hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
0879c936f4d2f5a563557bb46e7f858a243e28d3eed878f87b39a677365116fd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_equal
from asdf import yamlutil
from astropy import table
from astropy.io import fits
from ...types import AstropyAsdfType
class FitsType(AstropyAsdfType):
name = 'fits/fits'
types = ['astropy.io.fits.HDUList']
requires = ['astropy']
@classmethod
def from_tree(cls, data, ctx):
hdus = []
first = True
for hdu_entry in data:
header = fits.Header([fits.Card(*x) for x in hdu_entry['header']])
data = hdu_entry.get('data')
if data is not None:
try:
data = data.__array__()
except ValueError:
data = None
if first:
hdu = fits.PrimaryHDU(data=data, header=header)
first = False
elif data.dtype.names is not None:
hdu = fits.BinTableHDU(data=data, header=header)
else:
hdu = fits.ImageHDU(data=data, header=header)
hdus.append(hdu)
hdulist = fits.HDUList(hdus)
return hdulist
@classmethod
def to_tree(cls, hdulist, ctx):
units = []
for hdu in hdulist:
header_list = []
for card in hdu.header.cards:
if card.comment:
new_card = [card.keyword, card.value, card.comment]
else:
if card.value:
new_card = [card.keyword, card.value]
else:
if card.keyword:
new_card = [card.keyword]
else:
new_card = []
header_list.append(new_card)
hdu_dict = {}
hdu_dict['header'] = header_list
if hdu.data is not None:
if hdu.data.dtype.names is not None:
data = table.Table(hdu.data)
else:
data = hdu.data
hdu_dict['data'] = yamlutil.custom_tree_to_tagged_tree(data, ctx)
units.append(hdu_dict)
return units
@classmethod
def reserve_blocks(cls, data, ctx):
for hdu in data:
if hdu.data is not None:
yield ctx.blocks.find_or_create_block_for_array(hdu.data, ctx)
@classmethod
def assert_equal(cls, old, new):
for hdua, hdub in zip(old, new):
assert_array_equal(hdua.data, hdub.data)
for carda, cardb in zip(hdua.header.cards, hdub.header.cards):
assert tuple(carda) == tuple(cardb)
|
967e3aedc0b4ef8286f13233c2b02d21502c66f3bc999e16cf85b573c1ee9daf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from asdf import tagged, yamlutil
from astropy.modeling import mappings
from astropy.utils import minversion
from astropy.modeling import functional_models
from ...types import AstropyAsdfType
__all__ = ['TransformType', 'IdentityType', 'ConstantType', 'DomainType']
class TransformType(AstropyAsdfType):
version = '1.1.0'
requires = ['astropy']
@classmethod
def _from_tree_base_transform_members(cls, model, node, ctx):
if 'inverse' in node:
model.inverse = yamlutil.tagged_tree_to_custom_tree(
node['inverse'], ctx)
if 'name' in node:
model = model.rename(node['name'])
# TODO: Remove domain in a later version.
if 'domain' in node:
model.bounding_box = cls._domain_to_bounding_box(node['domain'])
elif 'bounding_box' in node:
model.bounding_box = node['bounding_box']
return model
@classmethod
def _domain_to_bounding_box(cls, domain):
bb = tuple([(item['lower'], item['upper']) for item in domain])
if len(bb) == 1:
bb = bb[0]
return bb
@classmethod
def from_tree_transform(cls, node, ctx):
raise NotImplementedError(
"Must be implemented in TransformType subclasses")
@classmethod
def from_tree(cls, node, ctx):
model = cls.from_tree_transform(node, ctx)
model = cls._from_tree_base_transform_members(model, node, ctx)
return model
@classmethod
def _to_tree_base_transform_members(cls, model, node, ctx):
if getattr(model, '_user_inverse', None) is not None:
node['inverse'] = yamlutil.custom_tree_to_tagged_tree(
model._user_inverse, ctx)
if model.name is not None:
node['name'] = model.name
try:
bb = model.bounding_box
except NotImplementedError:
bb = None
if bb is not None:
if model.n_inputs == 1:
bb = list(bb)
else:
bb = [list(item) for item in model.bounding_box]
node['bounding_box'] = bb
@classmethod
def to_tree_transform(cls, model, ctx):
raise NotImplementedError("Must be implemented in TransformType subclasses")
@classmethod
def to_tree(cls, model, ctx):
node = cls.to_tree_transform(model, ctx)
cls._to_tree_base_transform_members(model, node, ctx)
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
assert a.name == b.name
# TODO: Assert inverses are the same
class IdentityType(TransformType):
name = "transform/identity"
types = ['astropy.modeling.mappings.Identity']
@classmethod
def from_tree_transform(cls, node, ctx):
return mappings.Identity(node.get('n_dims', 1))
@classmethod
def to_tree_transform(cls, data, ctx):
node = {}
if data.n_inputs != 1:
node['n_dims'] = data.n_inputs
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, mappings.Identity) and
isinstance(b, mappings.Identity) and
a.n_inputs == b.n_inputs)
class ConstantType(TransformType):
name = "transform/constant"
types = ['astropy.modeling.functional_models.Const1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Const1D(node['value'])
@classmethod
def to_tree_transform(cls, data, ctx):
return {
'value': data.amplitude.value
}
class DomainType(AstropyAsdfType):
# TODO: Is this used anywhere? Can it be removed?
name = "transform/domain"
version = '1.0.0'
@classmethod
def from_tree(cls, node, ctx):
return node
@classmethod
def to_tree(cls, data, ctx):
return data
class GenericModel(mappings.Mapping):
def __init__(self, n_inputs, n_outputs):
mapping = tuple(range(n_inputs))
super(GenericModel, self).__init__(mapping)
self._outputs = tuple('x' + str(idx) for idx in range(self.n_outputs + 1))
class GenericType(TransformType):
name = "transform/generic"
types = [GenericModel]
@classmethod
def from_tree_transform(cls, node, ctx):
return GenericModel(
node['n_inputs'], node['n_outputs'])
@classmethod
def to_tree_transform(cls, data, ctx):
return {
'n_inputs': data.n_inputs,
'n_outputs': data.n_outputs
}
|
26ab077fc39b38d1b9a18e66faac498da206e5fc25b4e70b3423fa10c5c6fdef | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from numpy.testing import assert_array_equal
from asdf import yamlutil
from astropy import modeling
from .basic import TransformType
__all__ = ['AffineType', 'Rotate2DType', 'Rotate3DType']
class AffineType(TransformType):
name = "transform/affine"
types = ['astropy.modeling.projections.AffineTransformation2D']
@classmethod
def from_tree_transform(cls, node, ctx):
matrix = node['matrix']
translation = node['translation']
if matrix.shape != (2, 2):
raise NotImplementedError(
"asdf currently only supports 2x2 (2D) rotation transformation "
"matrices")
if translation.shape != (2,):
raise NotImplementedError(
"asdf currently only supports 2D translation transformations.")
return modeling.projections.AffineTransformation2D(
matrix=matrix, translation=translation)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'matrix': model.matrix.value, 'translation': model.translation.value}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (a.__class__ == b.__class__)
assert_array_equal(a.matrix, b.matrix)
assert_array_equal(a.translation, b.translation)
class Rotate2DType(TransformType):
name = "transform/rotate2d"
types = ['astropy.modeling.rotations.Rotation2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return modeling.rotations.Rotation2D(node['angle'])
@classmethod
def to_tree_transform(cls, model, ctx):
return {'angle': model.angle.value}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.rotations.Rotation2D) and
isinstance(b, modeling.rotations.Rotation2D))
assert_array_equal(a.angle, b.angle)
class Rotate3DType(TransformType):
name = "transform/rotate3d"
types = ['astropy.modeling.rotations.RotateNative2Celestial',
'astropy.modeling.rotations.RotateCelestial2Native',
'astropy.modeling.rotations.EulerAngleRotation']
@classmethod
def from_tree_transform(cls, node, ctx):
if node['direction'] == 'native2celestial':
return modeling.rotations.RotateNative2Celestial(node["phi"],
node["theta"],
node["psi"])
elif node['direction'] == 'celestial2native':
return modeling.rotations.RotateCelestial2Native(node["phi"],
node["theta"],
node["psi"])
else:
return modeling.rotations.EulerAngleRotation(node["phi"],
node["theta"],
node["psi"],
axes_order=node["direction"])
@classmethod
def to_tree_transform(cls, model, ctx):
if isinstance(model, modeling.rotations.RotateNative2Celestial):
try:
return {"phi": model.lon.value,
"theta": model.lat.value,
"psi": model.lon_pole.value,
"direction": "native2celestial"
}
except AttributeError:
return {"phi": model.lon,
"theta": model.lat,
"psi": model.lon_pole,
"direction": "native2celestial"
}
elif isinstance(model, modeling.rotations.RotateCelestial2Native):
try:
return {"phi": model.lon.value,
"theta": model.lat.value,
"psi": model.lon_pole.value,
"direction": "celestial2native"
}
except AttributeError:
return {"phi": model.lon,
"theta": model.lat,
"psi": model.lon_pole,
"direction": "celestial2native"
}
else:
return {"phi": model.phi.value,
"theta": model.theta.value,
"psi": model.psi.value,
"direction": model.axes_order
}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert a.__class__ == b.__class__
if a.__class__.__name__ == "EulerAngleRotation":
assert_array_equal(a.phi, b.phi)
assert_array_equal(a.psi, b.psi)
assert_array_equal(a.theta, b.theta)
else:
assert_array_equal(a.lon, b.lon)
assert_array_equal(a.lat, b.lat)
assert_array_equal(a.lon_pole, b.lon_pole)
class GenericProjectionType(TransformType):
@classmethod
def from_tree_transform(cls, node, ctx):
args = []
for param_name, default in cls.params:
args.append(node.get(param_name, default))
if node['direction'] == 'pix2sky':
return cls.types[0](*args)
else:
return cls.types[1](*args)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {}
if isinstance(model, cls.types[0]):
node['direction'] = 'pix2sky'
else:
node['direction'] = 'sky2pix'
for param_name, default in cls.params:
val = getattr(model, param_name).value
if val != default:
node[param_name] = val
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert a.__class__ == b.__class__
_generic_projections = {
'zenithal_perspective': ('ZenithalPerspective', (('mu', 0.0), ('gamma', 0.0))),
'gnomonic': ('Gnomonic', ()),
'stereographic': ('Stereographic', ()),
'slant_orthographic': ('SlantOrthographic', (('xi', 0.0), ('eta', 0.0))),
'zenithal_equidistant': ('ZenithalEquidistant', ()),
'zenithal_equal_area': ('ZenithalEqualArea', ()),
'airy': ('Airy', (('theta_b', 90.0),)),
'cylindrical_perspective': ('CylindricalPerspective', (('mu', 0.0), ('lam', 0.0))),
'cylindrical_equal_area': ('CylindricalEqualArea', (('lam', 0.0),)),
'plate_carree': ('PlateCarree', ()),
'mercator': ('Mercator', ()),
'sanson_flamsteed': ('SansonFlamsteed', ()),
'parabolic': ('Parabolic', ()),
'molleweide': ('Molleweide', ()),
'hammer_aitoff': ('HammerAitoff', ()),
'conic_perspective': ('ConicPerspective', (('sigma', 0.0), ('delta', 0.0))),
'conic_equal_area': ('ConicEqualArea', (('sigma', 0.0), ('delta', 0.0))),
'conic_equidistant': ('ConicEquidistant', (('sigma', 0.0), ('delta', 0.0))),
'conic_orthomorphic': ('ConicOrthomorphic', (('sigma', 0.0), ('delta', 0.0))),
'bonne_equal_area': ('BonneEqualArea', (('theta1', 0.0),)),
'polyconic': ('Polyconic', ()),
'tangential_spherical_cube': ('TangentialSphericalCube', ()),
'cobe_quad_spherical_cube': ('COBEQuadSphericalCube', ()),
'quad_spherical_cube': ('QuadSphericalCube', ()),
'healpix': ('HEALPix', (('H', 4.0), ('X', 3.0))),
'healpix_polar': ('HEALPixPolar', ())
}
def make_projection_types():
for tag_name, (name, params) in _generic_projections.items():
class_name = '{0}Type'.format(name)
types = ['astropy.modeling.projections.Pix2Sky_{0}'.format(name),
'astropy.modeling.projections.Sky2Pix_{0}'.format(name)]
globals()[class_name] = type(
str(class_name),
(GenericProjectionType,),
{'name': 'transform/{0}'.format(tag_name),
'types': types,
'params': params})
__all__.append(class_name)
make_projection_types()
|
0685aefea131c460b29a408bc32851eac8403e3aa85c6b7a99df09fd52d49e44 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_equal
from asdf import yamlutil
from astropy import modeling
from .basic import TransformType
__all__ = ['ShiftType', 'ScaleType', 'PolynomialType']
class ShiftType(TransformType):
name = "transform/shift"
types = ['astropy.modeling.models.Shift']
@classmethod
def from_tree_transform(cls, node, ctx):
offset = node['offset']
if not np.isscalar(offset):
raise NotImplementedError(
"Asdf currently only supports scalar inputs to Shift transform.")
return modeling.models.Shift(offset)
@classmethod
def to_tree_transform(cls, model, ctx):
return {'offset': model.offset.value}
#return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Shift) and
isinstance(b, modeling.models.Shift))
assert_array_equal(a.offset.value, b.offset.value)
class ScaleType(TransformType):
name = "transform/scale"
types = ['astropy.modeling.models.Scale']
@classmethod
def from_tree_transform(cls, node, ctx):
factor = node['factor']
if not np.isscalar(factor):
raise NotImplementedError(
"Asdf currently only supports scalar inputs to Scale transform.")
return modeling.models.Scale(factor)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'factor': model.factor.value}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Scale) and
isinstance(b, modeling.models.Scale))
assert_array_equal(a.factor, b.factor)
class PolynomialType(TransformType):
name = "transform/polynomial"
types = ['astropy.modeling.models.Polynomial1D',
'astropy.modeling.models.Polynomial2D']
@classmethod
def from_tree_transform(cls, node, ctx):
coefficients = np.asarray(node['coefficients'])
n_dim = coefficients.ndim
if n_dim == 1:
model = modeling.models.Polynomial1D(coefficients.size - 1)
model.parameters = coefficients
elif n_dim == 2:
shape = coefficients.shape
degree = shape[0] - 1
if shape[0] != shape[1]:
raise TypeError("Coefficients must be an (n+1, n+1) matrix")
coeffs = {}
for i in range(shape[0]):
for j in range(shape[0]):
if i + j < degree + 1:
name = 'c' + str(i) + '_' +str(j)
coeffs[name] = coefficients[i, j]
model = modeling.models.Polynomial2D(degree, **coeffs)
else:
raise NotImplementedError(
"Asdf currently only supports 1D or 2D polynomial transform.")
return model
@classmethod
def to_tree_transform(cls, model, ctx):
if isinstance(model, modeling.models.Polynomial1D):
coefficients = np.array(model.parameters)
elif isinstance(model, modeling.models.Polynomial2D):
degree = model.degree
coefficients = np.zeros((degree + 1, degree + 1))
for i in range(degree + 1):
for j in range(degree + 1):
if i + j < degree + 1:
name = 'c' + str(i) + '_' +str(j)
coefficients[i, j] = getattr(model, name).value
node = {'coefficients': coefficients}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, (modeling.models.Polynomial1D, modeling.models.Polynomial2D)) and
isinstance(b, (modeling.models.Polynomial1D, modeling.models.Polynomial2D)))
assert_array_equal(a.parameters, b.parameters)
|
d0e8b30edea3cb4f760d327f84e6ed8ea58c10d521488dabfb55c14b324cbdce | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_equal
from asdf import yamlutil
from astropy import modeling
from .basic import TransformType
__all__ = ['TabularType']
class TabularType(TransformType):
name = "transform/tabular"
types = [
modeling.models.Tabular2D, modeling.models.Tabular1D
]
@classmethod
def from_tree_transform(cls, node, ctx):
lookup_table = node.pop("lookup_table")
dim = lookup_table.ndim
name = node.get('name', None)
fill_value = node.pop("fill_value", None)
if dim == 1:
# The copy is necessary because the array is memory mapped.
points = (node['points'][0][:],)
model = modeling.models.Tabular1D(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value, name=name)
elif dim == 2:
points = tuple([p[:] for p in node['points']])
model = modeling.models.Tabular2D(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value, name=name)
else:
tabular_class = modeling.models.tabular_model(dim, name)
points = tuple([p[:] for p in node['points']])
model = tabular_class(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value, name=name)
return model
@classmethod
def to_tree_transform(cls, model, ctx):
node = {}
node["fill_value"] = model.fill_value
node["lookup_table"] = model.lookup_table
node["points"] = [p for p in model.points]
node["method"] = str(model.method)
node["bounds_error"] = model.bounds_error
node["name"] = model.name
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
assert_array_equal(a.lookup_table, b.lookup_table)
assert_array_equal(a.points, b.points)
assert (a.method == b.method)
if a.fill_value is None:
assert b.fill_value is None
elif np.isnan(a.fill_value):
assert np.isnan(b.fill_value)
else:
assert(a.fill_value == b.fill_value)
assert(a.bounds_error == b.bounds_error)
|
ec467a52b98d8f82248514a41f0216c92a43e28c966a4d9bfd68d7912bec8c11 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from asdf import tagged, yamlutil
from asdf.tests.helpers import assert_tree_match
from astropy import modeling
from astropy.modeling.models import Identity, Mapping
from .basic import TransformType, ConstantType
__all__ = ['CompoundType', 'RemapAxesType']
_operator_to_tag_mapping = {
'+' : 'add',
'-' : 'subtract',
'*' : 'multiply',
'/' : 'divide',
'**' : 'power',
'|' : 'compose',
'&' : 'concatenate'
}
_tag_to_method_mapping = {
'add' : '__add__',
'subtract' : '__sub__',
'multiply' : '__mul__',
'divide' : '__truediv__',
'power' : '__pow__',
'compose' : '__or__',
'concatenate' : '__and__'
}
class CompoundType(TransformType):
name = ['transform/' + x for x in _tag_to_method_mapping.keys()]
types = ['astropy.modeling.core._CompoundModel']
handle_dynamic_subclasses = True
@classmethod
def from_tree_tagged(cls, node, ctx):
tag = node._tag[node._tag.rfind('/')+1:]
tag = tag[:tag.rfind('-')]
oper = _tag_to_method_mapping[tag]
left = yamlutil.tagged_tree_to_custom_tree(
node['forward'][0], ctx)
if not isinstance(left, modeling.Model):
raise TypeError("Unknown model type '{0}'".format(
node['forward'][0]._tag))
right = yamlutil.tagged_tree_to_custom_tree(
node['forward'][1], ctx)
if not isinstance(right, modeling.Model):
raise TypeError("Unknown model type '{0}'".format(
node['forward'][1]._tag))
model = getattr(left, oper)(right)
model = cls._from_tree_base_transform_members(model, node, ctx)
return model
@classmethod
def _to_tree_from_model_tree(cls, tree, ctx):
if tree.left.isleaf:
left = yamlutil.custom_tree_to_tagged_tree(
tree.left.value, ctx)
else:
left = cls._to_tree_from_model_tree(tree.left, ctx)
if tree.right.isleaf:
right = yamlutil.custom_tree_to_tagged_tree(
tree.right.value, ctx)
else:
right = cls._to_tree_from_model_tree(tree.right, ctx)
node = {
'forward': [left, right]
}
try:
tag_name = 'transform/' + _operator_to_tag_mapping[tree.value]
except KeyError:
raise ValueError("Unknown operator '{0}'".format(tree.value))
node = tagged.tag_object(cls.make_yaml_tag(tag_name), node, ctx=ctx)
return node
@classmethod
def to_tree_tagged(cls, model, ctx):
node = cls._to_tree_from_model_tree(model._tree, ctx)
cls._to_tree_base_transform_members(model, node, ctx)
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert_tree_match(a._tree.left.value, b._tree.left.value)
assert_tree_match(a._tree.right.value, b._tree.right.value)
assert a._tree.value == b._tree.value
class RemapAxesType(TransformType):
name = 'transform/remap_axes'
types = ['astropy.modeling.models.Mapping']
@classmethod
def from_tree_transform(cls, node, ctx):
mapping = node['mapping']
n_inputs = node.get('n_inputs')
if all([isinstance(x, int) for x in mapping]):
return Mapping(tuple(mapping), n_inputs)
if n_inputs is None:
n_inputs = max([x for x in mapping
if isinstance(x, int)]) + 1
transform = Identity(n_inputs)
new_mapping = []
i = n_inputs
for entry in mapping:
if isinstance(entry, int):
new_mapping.append(entry)
else:
new_mapping.append(i)
transform = transform & ConstantType.from_tree(
{'value': int(entry.value)}, ctx)
i += 1
return transform | Mapping(new_mapping)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'mapping': list(model.mapping)}
if model.n_inputs > max(model.mapping) + 1:
node['n_inputs'] = model.n_inputs
return node
@classmethod
def assert_equal(cls, a, b):
TransformType.assert_equal(a, b)
assert a.mapping == b.mapping
assert(a.n_inputs == b.n_inputs)
|
dedd7c7e695bd28f17af07804e165779932b23cbb78a6a46fe813e060b50f86b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_equal
from asdf import yamlutil
from asdf.versioning import AsdfSpec
from astropy import time
from astropy import units as u
from astropy.units import Quantity
from astropy.coordinates import EarthLocation
from ...types import AstropyAsdfType
_guessable_formats = set(['iso', 'byear', 'jyear', 'yday'])
_astropy_format_to_asdf_format = {
'isot': 'iso',
'byear_str': 'byear',
'jyear_str': 'jyear'
}
def _assert_earthlocation_equal(a, b):
assert_array_equal(a.x, b.x)
assert_array_equal(a.y, b.y)
assert_array_equal(a.z, b.z)
assert_array_equal(a.lat, b.lat)
assert_array_equal(a.lon, b.lon)
class TimeType(AstropyAsdfType):
name = 'time/time'
version = '1.1.0'
supported_versions = ['1.0.0', AsdfSpec('>=1.1.0')]
types = ['astropy.time.core.Time']
requires = ['astropy']
@classmethod
def to_tree(cls, node, ctx):
format = node.format
if format == 'byear':
node = time.Time(node, format='byear_str')
elif format == 'jyear':
node = time.Time(node, format='jyear_str')
elif format in ('fits', 'datetime', 'plot_date'):
node = time.Time(node, format='isot')
format = node.format
format = _astropy_format_to_asdf_format.get(format, format)
guessable_format = format in _guessable_formats
if node.scale == 'utc' and guessable_format:
if node.isscalar:
return node.value
else:
return yamlutil.custom_tree_to_tagged_tree(
node.value, ctx)
d = {'value': yamlutil.custom_tree_to_tagged_tree(node.value, ctx)}
if not guessable_format:
d['format'] = format
if node.scale != 'utc':
d['scale'] = node.scale
if node.location is not None:
x, y, z = node.location.x, node.location.y, node.location.z
# Preserve backwards compatibility for writing the old schema
# This allows WCS to test backwards compatibility with old frames
# This code does get tested in CI, but we don't run a coverage test
if cls.version == '1.0.0': # pragma: no cover
unit = node.location.unit
d['location'] = { 'x': x, 'y': y, 'z': z, 'unit': unit }
else:
d['location'] = {
# It seems like EarthLocations can be represented either in
# terms of Cartesian coordinates or latitude and longitude, so
# we rather arbitrarily choose the former for our representation
'x': yamlutil.custom_tree_to_tagged_tree(x, ctx),
'y': yamlutil.custom_tree_to_tagged_tree(y, ctx),
'z': yamlutil.custom_tree_to_tagged_tree(z, ctx)
}
return d
@classmethod
def from_tree(cls, node, ctx):
if isinstance(node, (str, list, np.ndarray)):
t = time.Time(node)
format = _astropy_format_to_asdf_format.get(t.format, t.format)
if format not in _guessable_formats:
raise ValueError("Invalid time '{0}'".format(node))
return t
value = node['value']
format = node.get('format')
scale = node.get('scale')
location = node.get('location')
if location is not None:
unit = location.get('unit', u.m)
# This ensures that we can read the v.1.0.0 schema and convert it
# to the new EarthLocation object, which expects Quantity components
for comp in ['x', 'y', 'z']:
if not isinstance(location[comp], Quantity):
location[comp] = Quantity(location[comp], unit=unit)
location = EarthLocation.from_geocentric(
location['x'], location['y'], location['z'])
return time.Time(value, format=format, scale=scale, location=location)
@classmethod
def assert_equal(cls, old, new):
assert old.format == new.format
assert old.scale == new.scale
if isinstance(old.location, EarthLocation):
assert isinstance(new.location, EarthLocation)
_assert_earthlocation_equal(old.location, new.location)
else:
assert old.location == new.location
assert_array_equal(old, new)
|
725c00785debe4afaea2993e0fed55dd70564dd0b9188201ba330591d32fab1f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from asdf.yamlutil import custom_tree_to_tagged_tree
from astropy.units import Quantity
from astropy.coordinates import ICRS, Longitude, Latitude, Angle
from astropy.tests.helper import assert_quantity_allclose
from ...types import AstropyType
from ..unit.quantity import QuantityType
__all__ = ['ICRSCoordType']
class ICRSCoordType(AstropyType):
name = "coordinates/frames/icrs"
types = ['astropy.coordinates.ICRS']
requires = ['astropy']
version = "1.0.0"
@classmethod
def from_tree(cls, node, ctx):
angle = Angle(QuantityType.from_tree(node['ra']['wrap_angle'], ctx))
wrap_angle = Angle(angle)
ra = Longitude(
node['ra']['value'],
unit=node['ra']['unit'],
wrap_angle=wrap_angle)
dec = Latitude(node['dec']['value'], unit=node['dec']['unit'])
return ICRS(ra=ra, dec=dec)
@classmethod
def to_tree(cls, frame, ctx):
node = {}
wrap_angle = Quantity(frame.ra.wrap_angle)
node['ra'] = {
'value': frame.ra.value,
'unit': frame.ra.unit.to_string(),
'wrap_angle': custom_tree_to_tagged_tree(wrap_angle, ctx)
}
node['dec'] = {
'value': frame.dec.value,
'unit': frame.dec.unit.to_string()
}
return node
@classmethod
def assert_equal(cls, old, new):
assert isinstance(old, ICRS)
assert isinstance(new, ICRS)
assert_quantity_allclose(new.ra, old.ra)
assert_quantity_allclose(new.dec, old.dec)
|
6e744e7db677958b276090004c80e3ef8662501aca366f04ee4ba91714eb9d39 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import six
from astropy.units import Unit, UnitBase
from ...types import AstropyAsdfType
class UnitType(AstropyAsdfType):
name = 'unit/unit'
types = ['astropy.units.UnitBase']
requires = ['astropy']
@classmethod
def to_tree(cls, node, ctx):
if isinstance(node, six.string_types):
node = Unit(node, format='vounit', parse_strict='warn')
if isinstance(node, UnitBase):
return node.to_string(format='vounit')
raise TypeError("'{0}' is not a valid unit".format(node))
@classmethod
def from_tree(cls, node, ctx):
return Unit(node, format='vounit', parse_strict='silent')
|
aec73618735b6d87ea4d933d9ec4276c54a7c1d433d43c86af1da3af9aaf3ea2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from numpy import isscalar
from astropy.units import Quantity
from asdf.yamlutil import custom_tree_to_tagged_tree
from asdf.tags.core import NDArrayType
from ...types import AstropyAsdfType
from .unit import UnitType
class QuantityType(AstropyAsdfType):
name = 'unit/quantity'
types = ['astropy.units.Quantity']
requires = ['astropy']
version = '1.1.0'
@classmethod
def to_tree(cls, quantity, ctx):
node = {}
if isinstance(quantity, Quantity):
node['value'] = custom_tree_to_tagged_tree(quantity.value, ctx)
node['unit'] = custom_tree_to_tagged_tree(quantity.unit, ctx)
return node
raise TypeError("'{0}' is not a valid Quantity".format(quantity))
@classmethod
def from_tree(cls, node, ctx):
if isinstance(node, Quantity):
return node
unit = UnitType.from_tree(node['unit'], ctx)
value = node['value']
if isinstance(value, NDArrayType):
value = value._make_array()
return Quantity(value, unit=unit)
|
7c101e32c6c2ae927b3e07cefc6c257bca273b35a0e4303a42a2279372f542e7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from astropy import table
asdf = pytest.importorskip('asdf')
from asdf.tests import helpers
def test_table(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'))
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 3
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_array_columns(tmpdir):
a = np.array([([[1, 2], [3, 4]], 2.0, 'x'),
([[5, 6], [7, 8]], 5.0, 'y'),
([[9, 10], [11, 12]], 8.2, 'z')],
dtype=[(str('a'), str('<i4'), (2, 2)),
(str('b'), str('<f8')),
(str('c'), str('|S1'))])
t = table.Table(a, copy=False)
assert t.columns['a'].shape == (3, 2, 2)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_structured_array_columns(tmpdir):
a = np.array([((1, 'a'), 2.0, 'x'),
((4, 'b'), 5.0, 'y'),
((5, 'c'), 8.2, 'z')],
dtype=[(str('a'), [(str('a0'), str('<i4')),
(str('a1'), str('|S1'))]),
(str('b'), str('<f8')),
(str('c'), str('|S1'))])
t = table.Table(a, copy=False)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_table_row_order(tmpdir):
a = np.array([(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')],
dtype=[(str('a'), str('<i4')),
(str('b'), str('<f8')),
(str('c'), str('|S1'))])
t = table.Table(a, copy=False)
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_table_inline(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'))
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(list(ff.blocks.internal_blocks)) == 0
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check,
write_options={'auto_inline': 64})
def test_mismatched_columns():
yaml = """
table: !core/table
columns:
- !core/column
data: !core/ndarray
data: [0, 1, 2]
name: a
- !core/column
data: !core/ndarray
data: [0, 1, 2, 3]
name: b
"""
buff = helpers.yaml_to_asdf(yaml)
with pytest.raises(ValueError):
with asdf.AsdfFile.open(buff) as ff:
pass
def test_masked_table(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'), masked=True)
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['a'].mask = [True, False, True]
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 4
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
|
61df4ba1ddac29cafb265270a2a82b216c69981a12c830307f49c4a5f1cf87fb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import os
import pytest
import numpy as np
from astropy.io import fits
asdf = pytest.importorskip('asdf')
from asdf.tests import helpers
def test_complex_structure(tmpdir):
with fits.open(os.path.join(
os.path.dirname(__file__), 'data', 'complex.fits'), memmap=False) as hdulist:
tree = {
'fits': hdulist
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fits_table(tmpdir):
a = np.array(
[(0, 1), (2, 3)],
dtype=[(str('A'), int), (str('B'), int)])
h = fits.HDUList()
h.append(fits.BinTableHDU.from_columns(a))
tree = {'fits': h}
def check_yaml(content):
assert b'!core/table' in content
helpers.assert_roundtrip_tree(tree, tmpdir, raw_yaml_check_func=check_yaml)
|
16f9cbceb2d723550fd190bca0e54df04c71c0f827501ff362b98a185a4baccd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
import numpy as np
asdf = pytest.importorskip('asdf')
from asdf import util
from asdf.tests import helpers
from astropy.modeling import models as astmodels
test_models = [
astmodels.Identity(2), astmodels.Polynomial1D(2, c0=1, c1=2, c2=3),
astmodels.Polynomial2D(1, c0_0=1, c0_1=2, c1_0=3), astmodels.Shift(2.),
astmodels.Scale(3.4), astmodels.RotateNative2Celestial(5.63, -72.5, 180),
astmodels.RotateCelestial2Native(5.63, -72.5, 180),
astmodels.EulerAngleRotation(23, 14, 2.3, axes_order='xzx'),
astmodels.Mapping((0, 1), n_inputs=3)
]
def test_transforms_compound(tmpdir):
tree = {
'compound':
astmodels.Shift(1) & astmodels.Shift(2) |
astmodels.Sky2Pix_TAN() |
astmodels.Rotation2D() |
astmodels.AffineTransformation2D([[2, 0], [0, 2]], [42, 32]) +
astmodels.Rotation2D(32)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_inverse_transforms(tmpdir):
rotation = astmodels.Rotation2D(32)
rotation.inverse = astmodels.Rotation2D(45)
real_rotation = astmodels.Rotation2D(32)
tree = {
'rotation': rotation,
'real_rotation': real_rotation
}
def check(ff):
assert ff.tree['rotation'].inverse.angle == 45
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
@pytest.mark.parametrize(('model'), test_models)
def test_single_model(tmpdir, model):
tree = {'single_model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_name(tmpdir):
def check(ff):
assert ff.tree['rot'].name == 'foo'
tree = {'rot': astmodels.Rotation2D(23, name='foo')}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
def test_zenithal_with_arguments(tmpdir):
tree = {
'azp': astmodels.Sky2Pix_AZP(0.5, 0.3)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_naming_of_compound_model(tmpdir):
"""Issue #87"""
def asdf_check(ff):
assert ff.tree['model'].name == 'compound_model'
offx = astmodels.Shift(1)
scl = astmodels.Scale(2)
model = (offx | scl).rename('compound_model')
tree = {
'model': model
}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=asdf_check)
def test_generic_projections(tmpdir):
from astropy.io.misc.asdf.tags.transform import projections
for tag_name, (name, params) in projections._generic_projections.items():
tree = {
'forward': util.resolve_name(
'astropy.modeling.projections.Sky2Pix_{0}'.format(name))(),
'backward': util.resolve_name(
'astropy.modeling.projections.Pix2Sky_{0}'.format(name))()
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model(tmpdir):
points = np.arange(0, 5)
values = [1., 10, 2, 45, -3]
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
table = np.array([[ 3., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 0.]])
points = ([1, 2, 3], [1, 2, 3])
model2 = astmodels.Tabular2D(points, lookup_table=table, bounds_error=False,
fill_value=None, method='nearest')
tree = {'model': model2}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_bounding_box(tmpdir):
model = astmodels.Shift(1) & astmodels.Shift(2)
model.bounding_box = ((1, 3), (2, 4))
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
|
e7bc62a8ee47cfee2ee0b0897036ee6c90adbc81df3710134423282278f1faeb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import datetime
from collections import OrderedDict
import pytest
import numpy as np
from astropy import time
asdf = pytest.importorskip('asdf')
from asdf import AsdfFile, yamlutil, tagged
from asdf.tests import helpers
import asdf.schema as asdf_schema
def _flatten_combiners(schema):
newschema = OrderedDict()
def add_entry(path, schema, combiner):
# TODO: Simplify?
cursor = newschema
for i in range(len(path)):
part = path[i]
if isinstance(part, int):
cursor = cursor.setdefault('items', [])
while len(cursor) <= part:
cursor.append({})
cursor = cursor[part]
elif part == 'items':
cursor = cursor.setdefault('items', OrderedDict())
else:
cursor = cursor.setdefault('properties', OrderedDict())
if i < len(path) - 1 and isinstance(path[i+1], int):
cursor = cursor.setdefault(part, [])
else:
cursor = cursor.setdefault(part, OrderedDict())
cursor.update(schema)
def test_time(tmpdir):
time_array = time.Time(
np.arange(100), format="unix")
tree = {
'large_time_array': time_array
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_with_location(tmpdir):
# See https://github.com/spacetelescope/asdf/issues/341
from astropy import units as u
from astropy.coordinates.earth import EarthLocation
location = EarthLocation(x=[1,2]*u.m, y=[3,4]*u.m, z=[5,6]*u.m)
t = time.Time([1,2], location=location, format='cxcsec')
tree = {'time': t}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_isot(tmpdir):
tree = {
'time': time.Time('2000-01-01T00:00:00.000')
}
helpers.assert_roundtrip_tree(tree, tmpdir)
ff = asdf.AsdfFile(tree)
tree = yamlutil.custom_tree_to_tagged_tree(ff.tree, ff)
assert isinstance(tree['time'], str)
def test_time_tag():
schema = asdf_schema.load_schema(
'http://stsci.edu/schemas/asdf/time/time-1.1.0',
resolve_references=True)
schema = _flatten_combiners(schema)
date = time.Time(datetime.datetime.now())
tree = {'date': date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree['date'], asdf)
asdf_schema.validate(instance, schema=schema)
tag = 'tag:stsci.edu:asdf/time/time-1.1.0'
date = tagged.tag_object(tag, date)
tree = {'date': date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree['date'], asdf)
asdf_schema.validate(instance, schema=schema)
|
de53b29a0d286e64a8c04a9ee528395821e0b131e533ca77380550c262ddc303 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units
from astropy.coordinates import ICRS, Longitude, Latitude, Angle
from ....extension import AstropyExtension
def test_icrs_basic(tmpdir):
wrap_angle = Angle(1.5, unit=units.rad)
ra = Longitude(25, unit=units.deg, wrap_angle=wrap_angle)
dec = Latitude(45, unit=units.deg)
tree = {'coord': ICRS(ra=ra, dec=dec)}
assert_roundtrip_tree(tree, tmpdir, extensions=AstropyExtension())
@pytest.mark.xfail(
reason="Compound ICRS coordinates have not been implemented for ASDF yet")
def test_icrs_compound(tmpdir):
icrs = ICRS(ra=[0, 1, 2]*units.deg, dec=[3, 4, 5]*units.deg)
tree = {'coord': icrs}
assert_roundtrip_tree(tree, tmpdir, extensions=AstropyExtension())
|
e209b056b99dc99505100bc05c4d0c72c42d17451e939737aec0d7bfa3878225 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import io
import pytest
from astropy import units as u
asdf = pytest.importorskip('asdf')
from asdf.tests import helpers
# TODO: Implement defunit
def test_unit():
yaml = """
unit: !unit/unit-1.0.0 "2.1798721 10-18kg m2 s-2"
"""
buff = helpers.yaml_to_asdf(yaml)
with asdf.AsdfFile.open(buff) as ff:
assert ff.tree['unit'].is_equivalent(u.Ry)
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.AsdfFile.open(buff2) as ff:
assert ff.tree['unit'].is_equivalent(u.Ry)
|
f4773a25ec66531ae5c5b8eae807c9ba4dfbc3f1b77e4ae18f7cac7fd72218de | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import io
import pytest
from astropy import units
asdf = pytest.importorskip('asdf')
from asdf.tests import helpers
def roundtrip_quantity(yaml, quantity):
buff = helpers.yaml_to_asdf(yaml)
with asdf.AsdfFile.open(buff) as ff:
assert (ff.tree['quantity'] == quantity).all()
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.AsdfFile.open(buff2) as ff:
assert (ff.tree['quantity'] == quantity).all()
def test_value_scalar(tmpdir):
testval = 2.71828
testunit = units.kpc
yaml = """
quantity: !unit/quantity-1.1.0
value: {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_array(tmpdir):
testval = [3.14159]
testunit = units.kg
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_multiarray(tmpdir):
testval = [x*2.3081 for x in range(10)]
testunit = units.ampere
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_ndarray(tmpdir):
from numpy import array, float64
testval = [[1,2,3],[4,5,6]]
testunit = units.km
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0
datatype: float64
data:
{}
unit: {}
""".format(testval, testunit)
data = array(testval, float64)
quantity = units.Quantity(data, unit=testunit)
roundtrip_quantity(yaml, quantity)
|
233187cc80159984b7bcbac77019720b0699f016eca0c5ceb4fb5d9220411e6a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# LOCAL
from ....tests.helper import catch_warnings
from .. import converters
from .. import exceptions
from .. import tree
def test_reraise():
def fail():
raise RuntimeError("This failed")
try:
try:
fail()
except RuntimeError as e:
exceptions.vo_reraise(e, additional="From here")
except RuntimeError as e:
assert "From here" in str(e)
else:
assert False
def test_parse_vowarning():
config = {'pedantic': True,
'filename': 'foo.xml'}
pos = (42, 64)
with catch_warnings(exceptions.W47) as w:
field = tree.Field(
None, name='c', datatype='char',
config=config, pos=pos)
c = converters.get_converter(field, config=config, pos=pos)
parts = exceptions.parse_vowarning(str(w[0].message))
match = {
'number': 47,
'is_exception': False,
'nchar': 64,
'warning': 'W47',
'is_something': True,
'message': 'Missing arraysize indicates length 1',
'doc_url': 'io/votable/api_exceptions.html#w47',
'nline': 42,
'is_warning': True
}
assert parts == match
|
64ad329f4909ad55213f87c7226eb50051e179cb946079c0f1c0d30939024b7b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A set of tests for the util.py module
"""
# LOCAL
from .. import util
from ....tests.helper import raises
def test_range_list():
assert util.coerce_range_list_param((5,)) == ("5.0", 1)
def test_range_list2():
assert util.coerce_range_list_param((5e-7, 8e-7)) == ("5e-07,8e-07", 2)
def test_range_list3():
assert util.coerce_range_list_param((5e-7, 8e-7, "FOO")) == (
"5e-07,8e-07;FOO", 3)
@raises(ValueError)
def test_range_list4a():
util.coerce_range_list_param(
(5e-7, (None, 8e-7), (4, None), (4, 5), "J", "FOO"))
def test_range_list4():
assert (util.coerce_range_list_param(
(5e-7, (None, 8e-7), (4, None), (4, 5), "J", "FOO"), numeric=False) ==
("5e-07,/8e-07,4/,4/5,J;FOO", 6))
@raises(ValueError)
def test_range_list5():
util.coerce_range_list_param(('FOO', ))
@raises(ValueError)
def test_range_list6():
print(util.coerce_range_list_param((5, 'FOO'), util.stc_reference_frames))
def test_range_list7():
assert util.coerce_range_list_param(("J",), numeric=False) == ("J", 1)
def test_range_list8():
for s in ["5.0",
"5e-07,8e-07",
"5e-07,8e-07;FOO",
"5e-07,/8e-07,4.0/,4.0/5.0;FOO",
"J"]:
assert util.coerce_range_list_param(s, numeric=False)[0] == s
@raises(ValueError)
def test_range_list9a():
util.coerce_range_list_param("52,-27.8;FOO", util.stc_reference_frames)
def test_range_list9():
assert util.coerce_range_list_param(
"52,-27.8;GALACTIC", util.stc_reference_frames)
|
3f5cd5ae585b2f68a617ebe9899458972ad4b8f0d1d9f4e9488f032340b4313d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# LOCAL
from .. import parse
from ....utils.data import get_pkg_data_filename
def test_resource_groups():
# Read the VOTABLE
votable = parse(get_pkg_data_filename('data/resource_groups.xml'))
resource = votable.resources[0]
groups = resource.groups
params = resource.params
# Test that params inside groups are not outside
assert len(groups[0].entries) == 1
assert groups[0].entries[0].name == "ID"
assert len(params) == 2
assert params[0].name == "standardID"
assert params[1].name == "accessURL"
|
da49133324058d1f2d6e537a179b25af95954936336ffd312bb2d6c783e49b67 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a set of regression tests for vo.
"""
# STDLIB
import difflib
import io
import pathlib
import sys
import gzip
# THIRD-PARTY
import pytest
import numpy as np
from numpy.testing import assert_array_equal
# LOCAL
from ..table import parse, parse_single_table, validate
from .. import tree
from ..exceptions import VOTableSpecError, VOWarning
from ..xmlutil import validate_schema
from ....utils.data import get_pkg_data_filename, get_pkg_data_filenames
from ....tests.helper import raises, catch_warnings
# Determine the kind of float formatting in this build of Python
if hasattr(sys, 'float_repr_style'):
legacy_float_repr = (sys.float_repr_style == 'legacy')
else:
legacy_float_repr = sys.platform.startswith('win')
def assert_validate_schema(filename, version):
if sys.platform.startswith('win'):
return
try:
rc, stdout, stderr = validate_schema(filename, version)
except OSError:
# If xmllint is not installed, we want the test to pass anyway
return
assert rc == 0, 'File did not validate against VOTable schema'
def test_parse_single_table():
table = parse_single_table(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
assert isinstance(table, tree.Table)
assert len(table.array) == 5
def test_parse_single_table2():
table2 = parse_single_table(
get_pkg_data_filename('data/regression.xml'),
table_number=1,
pedantic=False)
assert isinstance(table2, tree.Table)
assert len(table2.array) == 1
assert len(table2.array.dtype.names) == 28
@raises(IndexError)
def test_parse_single_table3():
parse_single_table(
get_pkg_data_filename('data/regression.xml'),
table_number=3, pedantic=False)
def _test_regression(tmpdir, _python_based=False, binary_mode=1):
# Read the VOTABLE
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False,
_debug_python_based_parser=_python_based)
table = votable.get_first_table()
dtypes = [
((str('string test'), str('string_test')), str('|O8')),
((str('fixed string test'), str('string_test_2')), str('|S10')),
(str('unicode_test'), str('|O8')),
((str('unicode test'), str('fixed_unicode_test')), str('<U10')),
((str('string array test'), str('string_array_test')), str('|S4')),
(str('unsignedByte'), str('|u1')),
(str('short'), str('<i2')),
(str('int'), str('<i4')),
(str('long'), str('<i8')),
(str('double'), str('<f8')),
(str('float'), str('<f4')),
(str('array'), str('|O8')),
(str('bit'), str('|b1')),
(str('bitarray'), str('|b1'), (3, 2)),
(str('bitvararray'), str('|O8')),
(str('bitvararray2'), str('|O8')),
(str('floatComplex'), str('<c8')),
(str('doubleComplex'), str('<c16')),
(str('doubleComplexArray'), str('|O8')),
(str('doubleComplexArrayFixed'), str('<c16'), (2,)),
(str('boolean'), str('|b1')),
(str('booleanArray'), str('|b1'), (4,)),
(str('nulls'), str('<i4')),
(str('nulls_array'), str('<i4'), (2, 2)),
(str('precision1'), str('<f8')),
(str('precision2'), str('<f8')),
(str('doublearray'), str('|O8')),
(str('bitarray2'), str('|b1'), (16,))
]
if sys.byteorder == 'big':
new_dtypes = []
for dtype in dtypes:
dtype = list(dtype)
dtype[1] = dtype[1].replace(str('<'), str('>'))
new_dtypes.append(tuple(dtype))
dtypes = new_dtypes
assert table.array.dtype == dtypes
votable.to_xml(str(tmpdir.join("regression.tabledata.xml")),
_debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.tabledata.xml")),
votable.version)
if binary_mode == 1:
votable.get_first_table().format = 'binary'
votable.version = '1.1'
elif binary_mode == 2:
votable.get_first_table()._config['version_1_3_or_later'] = True
votable.get_first_table().format = 'binary2'
votable.version = '1.3'
# Also try passing a file handle
with open(str(tmpdir.join("regression.binary.xml")), "wb") as fd:
votable.to_xml(fd, _debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.binary.xml")),
votable.version)
# Also try passing a file handle
with open(str(tmpdir.join("regression.binary.xml")), "rb") as fd:
votable2 = parse(fd, pedantic=False,
_debug_python_based_parser=_python_based)
votable2.get_first_table().format = 'tabledata'
votable2.to_xml(str(tmpdir.join("regression.bin.tabledata.xml")),
_astropy_version="testing",
_debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.bin.tabledata.xml")),
votable.version)
with open(
get_pkg_data_filename(
'data/regression.bin.tabledata.truth.{0}.xml'.format(
votable.version)),
'rt', encoding='utf-8') as fd:
truth = fd.readlines()
with open(str(tmpdir.join("regression.bin.tabledata.xml")),
'rt', encoding='utf-8') as fd:
output = fd.readlines()
# If the lines happen to be different, print a diff
# This is convenient for debugging
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
# Test implicit gzip saving
votable2.to_xml(
str(tmpdir.join("regression.bin.tabledata.xml.gz")),
_astropy_version="testing",
_debug_python_based_parser=_python_based)
with gzip.GzipFile(
str(tmpdir.join("regression.bin.tabledata.xml.gz")), 'rb') as gzfd:
output = gzfd.readlines()
output = [x.decode('utf-8').rstrip() for x in output]
truth = [x.rstrip() for x in truth]
assert truth == output
@pytest.mark.xfail(str('legacy_float_repr'))
def test_regression(tmpdir):
_test_regression(tmpdir, False)
@pytest.mark.xfail(str('legacy_float_repr'))
def test_regression_python_based_parser(tmpdir):
_test_regression(tmpdir, True)
@pytest.mark.xfail(str('legacy_float_repr'))
def test_regression_binary2(tmpdir):
_test_regression(tmpdir, False, 2)
class TestFixups:
def setup_class(self):
self.table = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False).get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_implicit_id(self):
assert_array_equal(self.array['string_test_2'],
self.array['fixed string test'])
class TestReferences:
def setup_class(self):
self.votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_fieldref(self):
fieldref = self.table.groups[1].entries[0]
assert isinstance(fieldref, tree.FieldRef)
assert fieldref.get_ref().name == 'boolean'
assert fieldref.get_ref().datatype == 'boolean'
def test_paramref(self):
paramref = self.table.groups[0].entries[0]
assert isinstance(paramref, tree.ParamRef)
assert paramref.get_ref().name == 'INPUT'
assert paramref.get_ref().datatype == 'float'
def test_iter_fields_and_params_on_a_group(self):
assert len(list(self.table.groups[1].iter_fields_and_params())) == 2
def test_iter_groups_on_a_group(self):
assert len(list(self.table.groups[1].iter_groups())) == 1
def test_iter_groups(self):
# Because of the ref'd table, there are more logical groups
# than actually exist in the file
assert len(list(self.votable.iter_groups())) == 9
def test_ref_table(self):
tables = list(self.votable.iter_tables())
for x, y in zip(tables[0].array.data[0], tables[1].array.data[0]):
assert_array_equal(x, y)
def test_iter_coosys(self):
assert len(list(self.votable.iter_coosys())) == 1
def test_select_columns_by_index():
columns = [0, 5, 13]
table = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False, columns=columns).get_first_table()
array = table.array
mask = table.array.mask
assert array['string_test'][0] == b"String & test"
columns = ['string_test', 'unsignedByte', 'bitarray']
for c in columns:
assert not np.all(mask[c])
assert np.all(mask['unicode_test'])
def test_select_columns_by_name():
columns = ['string_test', 'unsignedByte', 'bitarray']
table = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False, columns=columns).get_first_table()
array = table.array
mask = table.array.mask
assert array['string_test'][0] == b"String & test"
for c in columns:
assert not np.all(mask[c])
assert np.all(mask['unicode_test'])
class TestParse:
def setup_class(self):
self.votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_string_test(self):
assert issubclass(self.array['string_test'].dtype.type,
np.object_)
assert_array_equal(
self.array['string_test'],
[b'String & test', b'String & test', b'XXXX',
b'', b''])
def test_fixed_string_test(self):
assert issubclass(self.array['string_test_2'].dtype.type,
np.string_)
assert_array_equal(
self.array['string_test_2'],
[b'Fixed stri', b'0123456789', b'XXXX', b'', b''])
def test_unicode_test(self):
assert issubclass(self.array['unicode_test'].dtype.type,
np.object_)
assert_array_equal(self.array['unicode_test'],
["Ceçi n'est pas un pipe",
'வணக்கம்',
'XXXX', '', ''])
def test_fixed_unicode_test(self):
assert issubclass(self.array['fixed_unicode_test'].dtype.type,
np.unicode_)
assert_array_equal(self.array['fixed_unicode_test'],
["Ceçi n'est",
'வணக்கம்',
'0123456789', '', ''])
def test_unsignedByte(self):
assert issubclass(self.array['unsignedByte'].dtype.type,
np.uint8)
assert_array_equal(self.array['unsignedByte'],
[128, 255, 0, 255, 255])
assert not np.any(self.mask['unsignedByte'])
def test_short(self):
assert issubclass(self.array['short'].dtype.type,
np.int16)
assert_array_equal(self.array['short'],
[4096, 32767, -4096, 32767, 32767])
assert not np.any(self.mask['short'])
def test_int(self):
assert issubclass(self.array['int'].dtype.type,
np.int32)
assert_array_equal(
self.array['int'],
[268435456, 2147483647, -268435456, 268435455, 123456789])
assert_array_equal(self.mask['int'],
[False, False, False, False, True])
def test_long(self):
assert issubclass(self.array['long'].dtype.type,
np.int64)
assert_array_equal(
self.array['long'],
[922337203685477, 123456789, -1152921504606846976,
1152921504606846975, 123456789])
assert_array_equal(self.mask['long'],
[False, True, False, False, True])
def test_double(self):
assert issubclass(self.array['double'].dtype.type,
np.float64)
assert_array_equal(self.array['double'],
[8.9990234375, 0.0, np.inf, np.nan, -np.inf])
assert_array_equal(self.mask['double'],
[False, False, False, True, False])
def test_float(self):
assert issubclass(self.array['float'].dtype.type,
np.float32)
assert_array_equal(self.array['float'],
[1.0, 0.0, np.inf, np.inf, np.nan])
assert_array_equal(self.mask['float'],
[False, False, False, False, True])
def test_array(self):
assert issubclass(self.array['array'].dtype.type,
np.object_)
match = [[],
[[42, 32], [12, 32]],
[[12, 34], [56, 78], [87, 65], [43, 21]],
[[-1, 23]],
[[31, -1]]]
for a, b in zip(self.array['array'], match):
# assert issubclass(a.dtype.type, np.int64)
# assert a.shape[1] == 2
for a0, b0 in zip(a, b):
assert issubclass(a0.dtype.type, np.int64)
assert_array_equal(a0, b0)
assert self.array.data['array'][3].mask[0][0]
assert self.array.data['array'][4].mask[0][1]
def test_bit(self):
assert issubclass(self.array['bit'].dtype.type,
np.bool_)
assert_array_equal(self.array['bit'],
[True, False, True, False, False])
def test_bit_mask(self):
assert_array_equal(self.mask['bit'],
[False, False, False, False, True])
def test_bitarray(self):
assert issubclass(self.array['bitarray'].dtype.type,
np.bool_)
assert self.array['bitarray'].shape == (5, 3, 2)
assert_array_equal(self.array['bitarray'],
[[[True, False],
[True, True],
[False, True]],
[[False, True],
[False, False],
[True, True]],
[[True, True],
[True, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]]])
def test_bitarray_mask(self):
assert_array_equal(self.mask['bitarray'],
[[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[True, True],
[True, True],
[True, True]],
[[True, True],
[True, True],
[True, True]]])
def test_bitvararray(self):
assert issubclass(self.array['bitvararray'].dtype.type,
np.object_)
match = [[True, True, True],
[False, False, False, False, False],
[True, False, True, False, True],
[], []]
for a, b in zip(self.array['bitvararray'], match):
assert_array_equal(a, b)
match_mask = [[False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
False, False]
for a, b in zip(self.array['bitvararray'], match_mask):
assert_array_equal(a.mask, b)
def test_bitvararray2(self):
assert issubclass(self.array['bitvararray2'].dtype.type,
np.object_)
match = [[],
[[[False, True],
[False, False],
[True, False]],
[[True, False],
[True, False],
[True, False]]],
[[[True, True],
[True, True],
[True, True]]],
[],
[]]
for a, b in zip(self.array['bitvararray2'], match):
for a0, b0 in zip(a, b):
assert a0.shape == (3, 2)
assert issubclass(a0.dtype.type, np.bool_)
assert_array_equal(a0, b0)
def test_floatComplex(self):
assert issubclass(self.array['floatComplex'].dtype.type,
np.complex64)
assert_array_equal(self.array['floatComplex'],
[np.nan+0j, 0+0j, 0+-1j, np.nan+0j, np.nan+0j])
assert_array_equal(self.mask['floatComplex'],
[True, False, False, True, True])
def test_doubleComplex(self):
assert issubclass(self.array['doubleComplex'].dtype.type,
np.complex128)
assert_array_equal(
self.array['doubleComplex'],
[np.nan+0j, 0+0j, 0+-1j, np.nan+(np.inf*1j), np.nan+0j])
assert_array_equal(self.mask['doubleComplex'],
[True, False, False, True, True])
def test_doubleComplexArray(self):
assert issubclass(self.array['doubleComplexArray'].dtype.type,
np.object_)
assert ([len(x) for x in self.array['doubleComplexArray']] ==
[0, 2, 2, 0, 0])
def test_boolean(self):
assert issubclass(self.array['boolean'].dtype.type,
np.bool_)
assert_array_equal(self.array['boolean'],
[True, False, True, False, False])
def test_boolean_mask(self):
assert_array_equal(self.mask['boolean'],
[False, False, False, False, True])
def test_boolean_array(self):
assert issubclass(self.array['booleanArray'].dtype.type,
np.bool_)
assert_array_equal(self.array['booleanArray'],
[[True, True, True, True],
[True, True, False, True],
[True, True, False, True],
[False, False, False, False],
[False, False, False, False]])
def test_boolean_array_mask(self):
assert_array_equal(self.mask['booleanArray'],
[[False, False, False, False],
[False, False, False, False],
[False, False, True, False],
[True, True, True, True],
[True, True, True, True]])
def test_nulls(self):
assert_array_equal(self.array['nulls'],
[0, -9, 2, -9, -9])
assert_array_equal(self.mask['nulls'],
[False, True, False, True, True])
def test_nulls_array(self):
assert_array_equal(self.array['nulls_array'],
[[[-9, -9], [-9, -9]],
[[0, 1], [2, 3]],
[[-9, 0], [-9, 1]],
[[0, -9], [1, -9]],
[[-9, -9], [-9, -9]]])
assert_array_equal(self.mask['nulls_array'],
[[[True, True],
[True, True]],
[[False, False],
[False, False]],
[[True, False],
[True, False]],
[[False, True],
[False, True]],
[[True, True],
[True, True]]])
def test_double_array(self):
assert issubclass(self.array['doublearray'].dtype.type,
np.object_)
assert len(self.array['doublearray'][0]) == 0
assert_array_equal(self.array['doublearray'][1],
[0, 1, np.inf, -np.inf, np.nan, 0, -1])
assert_array_equal(self.array.data['doublearray'][1].mask,
[False, False, False, False, False, False, True])
def test_bit_array2(self):
assert_array_equal(self.array['bitarray2'][0],
[True, True, True, True,
False, False, False, False,
True, True, True, True,
False, False, False, False])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'][0])
assert np.all(self.mask['bitarray2'][1:])
def test_get_coosys_by_id(self):
coosys = self.votable.get_coosys_by_id('J2000')
assert coosys.system == 'eq_FK5'
def test_get_field_by_utype(self):
fields = list(self.votable.get_fields_by_utype("myint"))
assert fields[0].name == "int"
assert fields[0].values.min == -1000
def test_get_info_by_id(self):
info = self.votable.get_info_by_id('QUERY_STATUS')
assert info.value == 'OK'
if self.votable.version != '1.1':
info = self.votable.get_info_by_id("ErrorInfo")
assert info.value == "One might expect to find some INFO here, too..." # noqa
def test_repr(self):
assert '3 tables' in repr(self.votable)
assert repr(list(self.votable.iter_fields_and_params())[0]) == \
'<PARAM ID="awesome" arraysize="*" datatype="float" name="INPUT" unit="deg" value="[0.0 0.0]"/>' # noqa
# Smoke test
repr(list(self.votable.iter_groups()))
# Resource
assert repr(self.votable.resources) == '[</>]'
class TestThroughTableData(TestParse):
def setup_class(self):
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
self.xmlout = bio = io.BytesIO()
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio, pedantic=False)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_bit_mask(self):
assert_array_equal(self.mask['bit'],
[False, False, False, False, False])
def test_bitarray_mask(self):
assert not np.any(self.mask['bitarray'])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'])
def test_schema(self, tmpdir):
# have to use an actual file because assert_validate_schema only works
# on filenames, not file-like objects
fn = str(tmpdir.join("test_through_tabledata.xml"))
with open(fn, 'wb') as f:
f.write(self.xmlout.getvalue())
assert_validate_schema(fn, '1.1')
class TestThroughBinary(TestParse):
def setup_class(self):
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
votable.get_first_table().format = 'binary'
self.xmlout = bio = io.BytesIO()
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio, pedantic=False)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
# Masked values in bit fields don't roundtrip through the binary
# representation -- that's not a bug, just a limitation, so
# override the mask array checks here.
def test_bit_mask(self):
assert not np.any(self.mask['bit'])
def test_bitarray_mask(self):
assert not np.any(self.mask['bitarray'])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'])
class TestThroughBinary2(TestParse):
def setup_class(self):
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
votable.version = '1.3'
votable.get_first_table()._config['version_1_3_or_later'] = True
votable.get_first_table().format = 'binary2'
self.xmlout = bio = io.BytesIO()
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio, pedantic=False)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_get_coosys_by_id(self):
# No COOSYS in VOTable 1.2 or later
pass
def table_from_scratch():
from ..tree import VOTableFile, Resource, Table, Field
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
Field(votable, ID="filename", datatype="char"),
Field(votable, ID="matrix", datatype="double", arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
out = io.StringIO()
votable.to_xml(out)
def test_open_files():
for filename in get_pkg_data_filenames('data', pattern='*.xml'):
if filename.endswith('custom_datatype.xml'):
continue
parse(filename, pedantic=False)
@raises(VOTableSpecError)
def test_too_many_columns():
parse(
get_pkg_data_filename('data/too_many_columns.xml.gz'),
pedantic=False)
def test_build_from_scratch(tmpdir):
# Create a new VOTable file...
votable = tree.VOTableFile()
# ...with one resource...
resource = tree.Resource()
votable.resources.append(resource)
# ... with one table
table = tree.Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
tree.Field(votable, ID="filename", datatype="char"),
tree.Field(votable, ID="matrix", datatype="double", arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
votable.to_xml(str(tmpdir.join("new_votable.xml")))
votable = parse(str(tmpdir.join("new_votable.xml")))
table = votable.get_first_table()
assert_array_equal(
table.array.mask, np.array([(False, [[False, False], [False, False]]),
(False, [[False, False], [False, False]])],
dtype=[(str('filename'), str('?')),
(str('matrix'), str('?'), (2, 2))]))
def test_validate(test_path_object=False):
"""
test_path_object is needed for test below ``test_validate_path_object``
so that file could be passed as pathlib.Path object.
"""
output = io.StringIO()
fpath = get_pkg_data_filename('data/regression.xml')
if test_path_object:
fpath = pathlib.Path(fpath)
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
with catch_warnings():
result = validate(fpath,
output, xmllint=False)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('validation.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(
get_pkg_data_filename('data/validation.txt'),
'rt', encoding='utf-8') as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
def test_validate_path_object():
"""
Validating when source is passed as path object. (#4412)
"""
test_validate(test_path_object=True)
def test_gzip_filehandles(tmpdir):
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
with open(str(tmpdir.join("regression.compressed.xml")), 'wb') as fd:
votable.to_xml(
fd,
compressed=True,
_astropy_version="testing")
with open(str(tmpdir.join("regression.compressed.xml")), 'rb') as fd:
votable = parse(
fd,
pedantic=False)
def test_from_scratch_example():
with catch_warnings(VOWarning) as warning_lines:
try:
_run_test_from_scratch_example()
except ValueError as e:
warning_lines.append(str(e))
assert len(warning_lines) == 0
def _run_test_from_scratch_example():
from ..tree import VOTableFile, Resource, Table, Field
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
Field(votable, name="filename", datatype="char", arraysize="*"),
Field(votable, name="matrix", datatype="double", arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
assert table.array[0][0] == 'test1.xml'
def test_fileobj():
# Assert that what we get back is a raw C file pointer
# so it will be super fast in the C extension.
from ....utils.xml import iterparser
filename = get_pkg_data_filename('data/regression.xml')
with iterparser._convert_to_fd_or_read_function(filename) as fd:
if sys.platform == 'win32':
fd()
else:
assert isinstance(fd, io.FileIO)
def test_nonstandard_units():
from .... import units as u
votable = parse(
get_pkg_data_filename('data/nonstandard_units.xml'),
pedantic=False)
assert isinstance(
votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
votable = parse(
get_pkg_data_filename('data/nonstandard_units.xml'),
pedantic=False,
unit_format='generic')
assert not isinstance(
votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
def test_resource_structure():
# Based on issue #1223, as reported by @astro-friedel and @RayPlante
from astropy.io.votable import tree as vot
vtf = vot.VOTableFile()
r1 = vot.Resource()
vtf.resources.append(r1)
t1 = vot.Table(vtf)
t1.name = "t1"
t2 = vot.Table(vtf)
t2.name = 't2'
r1.tables.append(t1)
r1.tables.append(t2)
r2 = vot.Resource()
vtf.resources.append(r2)
t3 = vot.Table(vtf)
t3.name = "t3"
t4 = vot.Table(vtf)
t4.name = "t4"
r2.tables.append(t3)
r2.tables.append(t4)
r3 = vot.Resource()
vtf.resources.append(r3)
t5 = vot.Table(vtf)
t5.name = "t5"
t6 = vot.Table(vtf)
t6.name = "t6"
r3.tables.append(t5)
r3.tables.append(t6)
buff = io.BytesIO()
vtf.to_xml(buff)
buff.seek(0)
vtf2 = parse(buff)
assert len(vtf2.resources) == 3
for r in range(len(vtf2.resources)):
res = vtf2.resources[r]
assert len(res.tables) == 2
assert len(res.resources) == 0
def test_no_resource_check():
output = io.StringIO()
with catch_warnings():
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
result = validate(get_pkg_data_filename('data/no_resource.xml'),
output, xmllint=False)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('no_resource.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(
get_pkg_data_filename('data/no_resource.txt'),
'rt', encoding='utf-8') as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
def test_instantiate_vowarning():
# This used to raise a deprecation exception.
# See https://github.com/astropy/astroquery/pull/276
VOWarning(())
def test_custom_datatype():
votable = parse(
get_pkg_data_filename('data/custom_datatype.xml'),
pedantic=False,
datatype_mapping={'bar': 'int'}
)
table = votable.get_first_table()
assert table.array.dtype['foo'] == np.int32
|
e1655b596e48ae5db7c83ddca5c8edbce35a27583806a938ae9315eea91841de | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from ....tests.helper import raises
# LOCAL
from .. import ucd
def test_none():
assert ucd.check_ucd(None)
examples = {
'phys.temperature':
[('ivoa', 'phys.temperature')],
'pos.eq.ra;meta.main':
[('ivoa', 'pos.eq.ra'), ('ivoa', 'meta.main')],
'meta.id;src':
[('ivoa', 'meta.id'), ('ivoa', 'src')],
'phot.flux;em.radio;arith.ratio':
[('ivoa', 'phot.flux'), ('ivoa', 'em.radio'), ('ivoa', 'arith.ratio')],
'PHot.Flux;EM.Radio;ivoa:arith.Ratio':
[('ivoa', 'phot.flux'), ('ivoa', 'em.radio'), ('ivoa', 'arith.ratio')],
'pos.galactic.lat':
[('ivoa', 'pos.galactic.lat')],
'meta.code;phot.mag':
[('ivoa', 'meta.code'), ('ivoa', 'phot.mag')],
'stat.error;phot.mag':
[('ivoa', 'stat.error'), ('ivoa', 'phot.mag')],
'phys.temperature;instr;stat.max':
[('ivoa', 'phys.temperature'), ('ivoa', 'instr'),
('ivoa', 'stat.max')],
'stat.error;phot.mag;em.opt.V':
[('ivoa', 'stat.error'), ('ivoa', 'phot.mag'), ('ivoa', 'em.opt.V')],
}
def test_check():
for s, p in examples.items():
assert ucd.parse_ucd(s, True, True) == p
assert ucd.check_ucd(s, True, True)
@raises(ValueError)
def test_too_many_colons():
ucd.parse_ucd("ivoa:stsci:phot", True, True)
@raises(ValueError)
def test_invalid_namespace():
ucd.parse_ucd("_ivoa:phot.mag", True, True)
@raises(ValueError)
def test_invalid_word():
ucd.parse_ucd("-pho")
|
9d7e5969aeb091104adf2ce335d8dd3a01515101752d5666493a5deefd53232a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the conversion to/from astropy.table
"""
import io
import os
import pathlib
import numpy as np
from ....utils.data import get_pkg_data_filename, get_pkg_data_fileobj
from ..table import parse, writeto
from .. import tree
def test_table(tmpdir):
# Read the VOTABLE
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
table = votable.get_first_table()
astropy_table = table.to_table()
for name in table.array.dtype.names:
assert np.all(astropy_table.mask[name] == table.array.mask[name])
votable2 = tree.VOTableFile.from_table(astropy_table)
t = votable2.get_first_table()
field_types = [
('string_test', {'datatype': 'char', 'arraysize': '*'}),
('string_test_2', {'datatype': 'char', 'arraysize': '10'}),
('unicode_test', {'datatype': 'unicodeChar', 'arraysize': '*'}),
('fixed_unicode_test', {'datatype': 'unicodeChar', 'arraysize': '10'}),
('string_array_test', {'datatype': 'char', 'arraysize': '4'}),
('unsignedByte', {'datatype': 'unsignedByte'}),
('short', {'datatype': 'short'}),
('int', {'datatype': 'int'}),
('long', {'datatype': 'long'}),
('double', {'datatype': 'double'}),
('float', {'datatype': 'float'}),
('array', {'datatype': 'long', 'arraysize': '2*'}),
('bit', {'datatype': 'bit'}),
('bitarray', {'datatype': 'bit', 'arraysize': '3x2'}),
('bitvararray', {'datatype': 'bit', 'arraysize': '*'}),
('bitvararray2', {'datatype': 'bit', 'arraysize': '3x2*'}),
('floatComplex', {'datatype': 'floatComplex'}),
('doubleComplex', {'datatype': 'doubleComplex'}),
('doubleComplexArray', {'datatype': 'doubleComplex', 'arraysize': '*'}),
('doubleComplexArrayFixed', {'datatype': 'doubleComplex', 'arraysize': '2'}),
('boolean', {'datatype': 'bit'}),
('booleanArray', {'datatype': 'bit', 'arraysize': '4'}),
('nulls', {'datatype': 'int'}),
('nulls_array', {'datatype': 'int', 'arraysize': '2x2'}),
('precision1', {'datatype': 'double'}),
('precision2', {'datatype': 'double'}),
('doublearray', {'datatype': 'double', 'arraysize': '*'}),
('bitarray2', {'datatype': 'bit', 'arraysize': '16'})]
for field, type in zip(t.fields, field_types):
name, d = type
assert field.ID == name
assert field.datatype == d['datatype']
if 'arraysize' in d:
assert field.arraysize == d['arraysize']
writeto(votable2, os.path.join(str(tmpdir), "through_table.xml"))
def test_read_through_table_interface(tmpdir):
from ....table import Table
with get_pkg_data_fileobj('data/regression.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable', table_id='main_table')
assert len(t) == 5
fn = os.path.join(str(tmpdir), "table_interface.xml")
t.write(fn, table_id='FOO', format='votable')
with open(fn, 'rb') as fd:
t2 = Table.read(fd, format='votable', table_id='FOO')
assert len(t2) == 5
def test_read_through_table_interface2():
from ....table import Table
with get_pkg_data_fileobj('data/regression.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable', table_id='last_table')
assert len(t) == 0
def test_names_over_ids():
with get_pkg_data_fileobj('data/names.xml', encoding='binary') as fd:
votable = parse(fd)
table = votable.get_first_table().to_table(use_names_over_ids=True)
assert table.colnames == [
'Name', 'GLON', 'GLAT', 'RAdeg', 'DEdeg', 'Jmag', 'Hmag', 'Kmag',
'G3.6mag', 'G4.5mag', 'G5.8mag', 'G8.0mag', '4.5mag', '8.0mag',
'Emag', '24mag', 'f_Name']
def test_table_read_with_unnamed_tables():
"""
Issue #927
"""
from ....table import Table
with get_pkg_data_fileobj('data/names.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable')
assert len(t) == 1
def test_votable_path_object():
"""
Testing when votable is passed as pathlib.Path object #4412.
"""
fpath = pathlib.Path(get_pkg_data_filename('data/names.xml'))
table = parse(fpath).get_first_table().to_table()
assert len(table) == 1
assert int(table[0][3]) == 266
def test_from_table_without_mask():
from ....table import Table, Column
t = Table()
c = Column(data=[1, 2, 3], name='a')
t.add_column(c)
output = io.BytesIO()
t.write(output, format='votable')
def test_write_with_format():
from ....table import Table, Column
t = Table()
c = Column(data=[1, 2, 3], name='a')
t.add_column(c)
output = io.BytesIO()
t.write(output, format='votable', tabledata_format="binary")
obuff = output.getvalue()
assert b'VOTABLE version="1.3"' in obuff
assert b'BINARY' in obuff
assert b'TABLEDATA' not in obuff
output = io.BytesIO()
t.write(output, format='votable', tabledata_format="binary2")
obuff = output.getvalue()
assert b'VOTABLE version="1.3"' in obuff
assert b'BINARY2' in obuff
assert b'TABLEDATA' not in obuff
def test_empty_table():
votable = parse(
get_pkg_data_filename('data/empty_table.xml'),
pedantic=False)
table = votable.get_first_table()
astropy_table = table.to_table()
|
acedeaf6a6ffa484ac299efd1ab8e1d90917cbc795ec56091d2cd5dbadfd8ab1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# LOCAL
from .. import exceptions
from .. import tree
from ....tests.helper import raises
@raises(exceptions.W07)
def test_check_astroyear_fail():
config = {'pedantic': True}
field = tree.Field(None, name='astroyear')
tree.check_astroyear('X2100', field, config)
@raises(exceptions.W08)
def test_string_fail():
config = {'pedantic': True}
tree.check_string(42, 'foo', config)
def test_make_Fields():
votable = tree.VOTableFile()
# ...with one resource...
resource = tree.Resource()
votable.resources.append(resource)
# ... with one table
table = tree.Table(votable)
resource.tables.append(table)
table.fields.extend([tree.Field(votable, name='Test', datatype="float", unit="mag")])
|
bcd937c78c333b9a8820d7564f4fd69b899243ef87ff6710e013bc0d530ca66d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
# THIRD-PARTY
import numpy as np
from numpy.testing import assert_array_equal
# LOCAL
from .. import converters
from .. import exceptions
from .. import tree
from ..table import parse_single_table
from ....tests.helper import raises, catch_warnings
from ....utils.data import get_pkg_data_filename
@raises(exceptions.E13)
def test_invalid_arraysize():
field = tree.Field(
None, name='broken', datatype='char', arraysize='foo')
converters.get_converter(field)
def test_oversize_char():
config = {'pedantic': True}
with catch_warnings(exceptions.W47) as w:
field = tree.Field(
None, name='c', datatype='char',
config=config)
c = converters.get_converter(field, config=config)
assert len(w) == 1
with catch_warnings(exceptions.W46) as w:
c.parse("XXX")
assert len(w) == 1
def test_char_mask():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='char',
config=config)
c = converters.get_converter(field, config=config)
assert c.output("Foo", True) == ''
def test_oversize_unicode():
config = {'pedantic': True}
with catch_warnings(exceptions.W46) as w:
field = tree.Field(
None, name='c2', datatype='unicodeChar',
config=config)
c = converters.get_converter(field, config=config)
c.parse("XXX")
assert len(w) == 1
def test_unicode_mask():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='unicodeChar',
config=config)
c = converters.get_converter(field, config=config)
assert c.output("Foo", True) == ''
@raises(exceptions.E02)
def test_wrong_number_of_elements():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='int', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
c.parse("2 3 4 5 6")
@raises(ValueError)
def test_float_mask():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='float',
config=config)
c = converters.get_converter(field, config=config)
assert c.parse('') == (c.null, True)
c.parse('null')
def test_float_mask_permissive():
config = {'pedantic': False}
field = tree.Field(
None, name='c', datatype='float',
config=config)
c = converters.get_converter(field, config=config)
assert c.parse('null') == (c.null, True)
@raises(exceptions.E02)
def test_complex_array_vararray():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='floatComplex', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
c.parse("2 3 4 5 6")
def test_complex_array_vararray2():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='floatComplex', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("")
assert len(x[0]) == 0
def test_complex_array_vararray3():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='doubleComplex', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("1 2 3 4 5 6 7 8 9 10 11 12")
assert len(x) == 2
assert np.all(x[0][0][0] == complex(1, 2))
def test_complex_vararray():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='doubleComplex', arraysize='*',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("1 2 3 4")
assert len(x) == 2
assert x[0][0] == complex(1, 2)
@raises(exceptions.E03)
def test_complex():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='doubleComplex',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("1 2 3")
@raises(exceptions.E04)
def test_bit():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='bit',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("T")
def test_bit_mask():
config = {'pedantic': True}
with catch_warnings(exceptions.W39) as w:
field = tree.Field(
None, name='c', datatype='bit',
config=config)
c = converters.get_converter(field, config=config)
c.output(True, True)
assert len(w) == 1
@raises(exceptions.E05)
def test_boolean():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='boolean',
config=config)
c = converters.get_converter(field, config=config)
c.parse('YES')
def test_boolean_array():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='boolean', arraysize='*',
config=config)
c = converters.get_converter(field, config=config)
r, mask = c.parse('TRUE FALSE T F 0 1')
assert_array_equal(r, [True, False, True, False, False, True])
@raises(exceptions.E06)
def test_invalid_type():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='foobar',
config=config)
c = converters.get_converter(field, config=config)
def test_precision():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='float', precision="E4",
config=config)
c = converters.get_converter(field, config=config)
assert c.output(266.248, False) == '266.2'
field = tree.Field(
None, name='c', datatype='float', precision="F4",
config=config)
c = converters.get_converter(field, config=config)
assert c.output(266.248, False) == '266.2480'
@raises(exceptions.W51)
def test_integer_overflow():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='int', config=config)
c = converters.get_converter(field, config=config)
c.parse('-2208988800', config=config)
def test_float_default_precision():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='float', arraysize="4",
config=config)
c = converters.get_converter(field, config=config)
assert (c.output([1, 2, 3, 8.9990234375], [False, False, False, False]) ==
'1 2 3 8.9990234375')
def test_vararray():
votable = tree.VOTableFile()
resource = tree.Resource()
votable.resources.append(resource)
table = tree.Table(votable)
resource.tables.append(table)
tabarr = []
heads = ['headA', 'headB', 'headC']
types = ["char", "double", "int"]
vals = [["A", 1.0, 2],
["B", 2.0, 3],
["C", 3.0, 4]]
for i in range(len(heads)):
tabarr.append(tree.Field(
votable, name=heads[i], datatype=types[i], arraysize="*"))
table.fields.extend(tabarr)
table.create_arrays(len(vals))
for i in range(len(vals)):
values = tuple(vals[i])
table.array[i] = values
buff = io.BytesIO()
votable.to_xml(buff)
def test_gemini_v1_2():
'''
see Pull Request 4782 or Issue 4781 for details
'''
table = parse_single_table(get_pkg_data_filename('data/gemini.xml'))
assert table is not None
|
86391efb1dae36e6673d96b2c8695aa5d4e74a06e79fed1feafae94b95d39de1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains a class to handle a validation result for a single VOTable
file.
"""
# STDLIB
from xml.parsers.expat import ExpatError
import hashlib
import os
import shutil
import socket
import subprocess
import warnings
import pickle
import urllib.request
import urllib.error
import http.client
# VO
from .. import table
from .. import exceptions
from .. import xmlutil
class Result:
def __init__(self, url, root='results', timeout=10):
self.url = url
m = hashlib.md5()
m.update(url)
self._hash = m.hexdigest()
self._root = root
self._path = os.path.join(
self._hash[0:2], self._hash[2:4], self._hash[4:])
if not os.path.exists(self.get_dirpath()):
os.makedirs(self.get_dirpath())
self.timeout = timeout
self.load_attributes()
def __enter__(self):
return self
def __exit__(self, *args):
self.save_attributes()
def get_dirpath(self):
return os.path.join(self._root, self._path)
def get_htmlpath(self):
return self._path
def get_attribute_path(self):
return os.path.join(self.get_dirpath(), "values.dat")
def get_vo_xml_path(self):
return os.path.join(self.get_dirpath(), "vo.xml")
# ATTRIBUTES
def load_attributes(self):
path = self.get_attribute_path()
if os.path.exists(path):
try:
with open(path, 'rb') as fd:
self._attributes = pickle.load(fd)
except Exception:
shutil.rmtree(self.get_dirpath())
os.makedirs(self.get_dirpath())
self._attributes = {}
else:
self._attributes = {}
def save_attributes(self):
path = self.get_attribute_path()
with open(path, 'wb') as fd:
pickle.dump(self._attributes, fd)
def __getitem__(self, key):
return self._attributes[key]
def __setitem__(self, key, val):
self._attributes[key] = val
def __contains__(self, key):
return key in self._attributes
# VO XML
def download_xml_content(self):
path = self.get_vo_xml_path()
if 'network_error' not in self._attributes:
self['network_error'] = None
if os.path.exists(path):
return
def fail(reason):
reason = str(reason)
with open(path, 'wb') as fd:
fd.write('FAILED: {0}\n'.format(reason).encode('utf-8'))
self['network_error'] = reason
r = None
try:
r = urllib.request.urlopen(
self.url.decode('ascii'), timeout=self.timeout)
except urllib.error.URLError as e:
if hasattr(e, 'reason'):
reason = e.reason
else:
reason = e.code
fail(reason)
return
except http.client.HTTPException as e:
fail("HTTPException: {}".format(str(e)))
return
except (socket.timeout, socket.error) as e:
fail("Timeout")
return
if r is None:
fail("Invalid URL")
return
try:
content = r.read()
except socket.timeout as e:
fail("Timeout")
return
else:
r.close()
with open(path, 'wb') as fd:
fd.write(content)
def get_xml_content(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
with open(path, 'rb') as fd:
content = fd.read()
return content
def validate_vo(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
self['version'] = ''
if 'network_error' in self and self['network_error'] is not None:
self['nwarnings'] = 0
self['nexceptions'] = 0
self['warnings'] = []
self['xmllint'] = None
self['warning_types'] = set()
return
nexceptions = 0
nwarnings = 0
t = None
lines = []
with open(path, 'rb') as input:
with warnings.catch_warnings(record=True) as warning_lines:
try:
t = table.parse(input, pedantic=False, filename=path)
except (ValueError, TypeError, ExpatError) as e:
lines.append(str(e))
nexceptions += 1
lines = [str(x.message) for x in warning_lines] + lines
if t is not None:
self['version'] = version = t.version
else:
self['version'] = version = "1.0"
if 'xmllint' not in self:
# Now check the VO schema based on the version in
# the file.
try:
success, stdout, stderr = xmlutil.validate_schema(path, version)
# OSError is raised when XML file eats all memory and
# system sends kill signal.
except OSError as e:
self['xmllint'] = None
self['xmllint_content'] = str(e)
else:
self['xmllint'] = (success == 0)
self['xmllint_content'] = stderr
warning_types = set()
for line in lines:
w = exceptions.parse_vowarning(line)
if w['is_warning']:
nwarnings += 1
if w['is_exception']:
nexceptions += 1
warning_types.add(w['warning'])
self['nwarnings'] = nwarnings
self['nexceptions'] = nexceptions
self['warnings'] = lines
self['warning_types'] = warning_types
def has_warning(self, warning_code):
return warning_code in self['warning_types']
def match_expectations(self):
if 'network_error' not in self:
self['network_error'] = None
if self['expected'] == 'good':
return (not self['network_error'] and
self['nwarnings'] == 0 and
self['nexceptions'] == 0)
elif self['expected'] == 'incorrect':
return (not self['network_error'] and
(self['nwarnings'] > 0 or
self['nexceptions'] > 0))
elif self['expected'] == 'broken':
return self['network_error'] is not None
def validate_with_votlint(self, path_to_stilts_jar):
filename = self.get_vo_xml_path()
p = subprocess.Popen(
"java -jar {} votlint validate=false {}".format(
path_to_stilts_jar, filename),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if len(stdout) or p.returncode:
self['votlint'] = False
else:
self['votlint'] = True
self['votlint_content'] = stdout
def get_result_subsets(results, root, s=None):
all_results = []
correct = []
not_expected = []
fail_schema = []
schema_mismatch = []
fail_votlint = []
votlint_mismatch = []
network_failures = []
version_10 = []
version_11 = []
version_12 = []
version_unknown = []
has_warnings = []
warning_set = {}
has_exceptions = []
exception_set = {}
for url in results:
if s:
next(s)
if isinstance(url, Result):
x = url
else:
x = Result(url, root=root)
all_results.append(x)
if (x['nwarnings'] == 0 and
x['nexceptions'] == 0 and
x['xmllint'] is True):
correct.append(x)
if not x.match_expectations():
not_expected.append(x)
if x['xmllint'] is False:
fail_schema.append(x)
if (x['xmllint'] is False and
x['nwarnings'] == 0 and
x['nexceptions'] == 0):
schema_mismatch.append(x)
if 'votlint' in x and x['votlint'] is False:
fail_votlint.append(x)
if 'network_error' not in x:
x['network_error'] = None
if (x['nwarnings'] == 0 and
x['nexceptions'] == 0 and
x['network_error'] is None):
votlint_mismatch.append(x)
if 'network_error' in x and x['network_error'] is not None:
network_failures.append(x)
version = x['version']
if version == '1.0':
version_10.append(x)
elif version == '1.1':
version_11.append(x)
elif version == '1.2':
version_12.append(x)
else:
version_unknown.append(x)
if x['nwarnings'] > 0:
has_warnings.append(x)
for warning in x['warning_types']:
if (warning is not None and
len(warning) == 3 and
warning.startswith('W')):
warning_set.setdefault(warning, [])
warning_set[warning].append(x)
if x['nexceptions'] > 0:
has_exceptions.append(x)
for exc in x['warning_types']:
if exc is not None and len(exc) == 3 and exc.startswith('E'):
exception_set.setdefault(exc, [])
exception_set[exc].append(x)
warning_set = list(warning_set.items())
warning_set.sort()
exception_set = list(exception_set.items())
exception_set.sort()
tables = [
('all', 'All tests', all_results),
('correct', 'Correct', correct),
('unexpected', 'Unexpected', not_expected),
('schema', 'Invalid against schema', fail_schema),
('schema_mismatch', 'Invalid against schema/Passed vo.table',
schema_mismatch, ['ul']),
('fail_votlint', 'Failed votlint', fail_votlint),
('votlint_mismatch', 'Failed votlint/Passed vo.table',
votlint_mismatch, ['ul']),
('network_failures', 'Network failures', network_failures),
('version1.0', 'Version 1.0', version_10),
('version1.1', 'Version 1.1', version_11),
('version1.2', 'Version 1.2', version_12),
('version_unknown', 'Version unknown', version_unknown),
('warnings', 'Warnings', has_warnings)]
for warning_code, warning in warning_set:
if s:
next(s)
warning_class = getattr(exceptions, warning_code, None)
if warning_class:
warning_descr = warning_class.get_short_name()
tables.append(
(warning_code,
'{}: {}'.format(warning_code, warning_descr),
warning, ['ul', 'li']))
tables.append(
('exceptions', 'Exceptions', has_exceptions))
for exception_code, exc in exception_set:
if s:
next(s)
exception_class = getattr(exceptions, exception_code, None)
if exception_class:
exception_descr = exception_class.get_short_name()
tables.append(
(exception_code,
'{}: {}'.format(exception_code, exception_descr),
exc, ['ul', 'li']))
return tables
|
b057d2b84d531d713b1fd6f884edb4b8a1034818c62d7e9f930cbf7d406e0b80 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Validates a large collection of web-accessible VOTable files,
and generates a report as a directory tree of HTML files.
"""
# STDLIB
import os
# LOCAL
from ....utils.data import get_pkg_data_filename
from . import html
from . import result
__all__ = ['make_validation_report']
def get_srcdir():
return os.path.dirname(__file__)
def get_urls(destdir, s):
import gzip
types = ['good', 'broken', 'incorrect']
seen = set()
urls = []
for type in types:
filename = get_pkg_data_filename(
'urls/cone.{0}.dat.gz'.format(type))
with gzip.open(filename, 'rb') as fd:
for url in fd.readlines():
next(s)
url = url.strip()
if url not in seen:
with result.Result(url, root=destdir) as r:
r['expected'] = type
urls.append(url)
seen.add(url)
return urls
def download(args):
url, destdir = args
with result.Result(url, root=destdir) as r:
r.download_xml_content()
def validate_vo(args):
url, destdir = args
with result.Result(url, root=destdir) as r:
r.validate_vo()
def votlint_validate(args):
path_to_stilts_jar, url, destdir = args
with result.Result(url, root=destdir) as r:
if r['network_error'] is None:
r.validate_with_votlint(path_to_stilts_jar)
def write_html_result(args):
url, destdir = args
with result.Result(url, root=destdir) as r:
html.write_result(r)
def write_subindex(args):
subset, destdir, total = args
html.write_index_table(destdir, *subset, total=total)
def make_validation_report(
urls=None, destdir='astropy.io.votable.validator.results',
multiprocess=True, stilts=None):
"""
Validates a large collection of web-accessible VOTable files.
Generates a report as a directory tree of HTML files.
Parameters
----------
urls : list of strings, optional
If provided, is a list of HTTP urls to download VOTable files
from. If not provided, a built-in set of ~22,000 urls
compiled by HEASARC will be used.
destdir : path, optional
The directory to write the report to. By default, this is a
directory called ``'results'`` in the current directory. If the
directory does not exist, it will be created.
multiprocess : bool, optional
If `True` (default), perform validations in parallel using all
of the cores on this machine.
stilts : path, optional
To perform validation with ``votlint`` from the the Java-based
`STILTS <http://www.star.bris.ac.uk/~mbt/stilts/>`_ VOTable
parser, in addition to `astropy.io.votable`, set this to the
path of the ``'stilts.jar'`` file. ``java`` on the system shell
path will be used to run it.
Notes
-----
Downloads of each given URL will be performed only once and cached
locally in *destdir*. To refresh the cache, remove *destdir*
first.
"""
from ....utils.console import (color_print, ProgressBar, Spinner)
if stilts is not None:
if not os.path.exists(stilts):
raise ValueError(
'{0} does not exist.'.format(stilts))
destdir = os.path.abspath(destdir)
if urls is None:
with Spinner('Loading URLs', 'green') as s:
urls = get_urls(destdir, s)
else:
color_print('Marking URLs', 'green')
for url in ProgressBar.iterate(urls):
with result.Result(url, root=destdir) as r:
r['expected'] = type
args = [(url, destdir) for url in urls]
color_print('Downloading VO files', 'green')
ProgressBar.map(
download, args, multiprocess=multiprocess)
color_print('Validating VO files', 'green')
ProgressBar.map(
validate_vo, args, multiprocess=multiprocess)
if stilts is not None:
color_print('Validating with votlint', 'green')
votlint_args = [(stilts, x, destdir) for x in urls]
ProgressBar.map(
votlint_validate, votlint_args, multiprocess=multiprocess)
color_print('Generating HTML files', 'green')
ProgressBar.map(
write_html_result, args, multiprocess=multiprocess)
with Spinner('Grouping results', 'green') as s:
subsets = result.get_result_subsets(urls, destdir, s)
color_print('Generating index', 'green')
html.write_index(subsets, urls, destdir)
color_print('Generating subindices', 'green')
subindex_args = [(subset, destdir, len(urls)) for subset in subsets]
ProgressBar.map(
write_subindex, subindex_args, multiprocess=multiprocess)
|
6bdad2594491985985084b21ecc53f100557b6f8826708dd0b680451979530fd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import contextlib
from math import ceil
import os
import re
# ASTROPY
from ....utils.xml.writer import XMLWriter, xml_escape
from .... import online_docs_root
# VO
from .. import exceptions
html_header = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html
PUBLIC "-//W3C//DTD XHTML Basic 1.0//EN"
"http://www.w3.org/TR/xhtml-basic/xhtml-basic10.dtd">
"""
default_style = """
body {
font-family: sans-serif
}
a {
text-decoration: none
}
.highlight {
color: red;
font-weight: bold;
text-decoration: underline;
}
.green { background-color: #ddffdd }
.red { background-color: #ffdddd }
.yellow { background-color: #ffffdd }
tr:hover { background-color: #dddddd }
table {
border-width: 1px;
border-spacing: 0px;
border-style: solid;
border-color: gray;
border-collapse: collapse;
background-color: white;
padding: 5px;
}
table th {
border-width: 1px;
padding: 5px;
border-style: solid;
border-color: gray;
}
table td {
border-width: 1px;
padding: 5px;
border-style: solid;
border-color: gray;
}
"""
@contextlib.contextmanager
def make_html_header(w):
w.write(html_header)
with w.tag('html', xmlns="http://www.w3.org/1999/xhtml", lang="en-US"):
with w.tag('head'):
w.element('title', 'VO Validation results')
w.element('style', default_style)
with w.tag('body'):
yield
def write_source_line(w, line, nchar=0):
part1 = xml_escape(line[:nchar].decode('utf-8'))
char = xml_escape(line[nchar:nchar+1].decode('utf-8'))
part2 = xml_escape(line[nchar+1:].decode('utf-8'))
w.write(' ')
w.write(part1)
w.write('<span class="highlight">{}</span>'.format(char))
w.write(part2)
w.write('\n\n')
def write_warning(w, line, xml_lines):
warning = exceptions.parse_vowarning(line)
if not warning['is_something']:
w.data(line)
else:
w.write('Line {:d}: '.format(warning['nline']))
if warning['warning']:
w.write('<a href="{}/{}">{}</a>: '.format(
online_docs_root, warning['doc_url'], warning['warning']))
msg = warning['message']
if not isinstance(warning['message'], str):
msg = msg.decode('utf-8')
w.write(xml_escape(msg))
w.write('\n')
if 1 <= warning['nline'] < len(xml_lines):
write_source_line(w, xml_lines[warning['nline'] - 1], warning['nchar'])
def write_votlint_warning(w, line, xml_lines):
match = re.search(r"(WARNING|ERROR|INFO) \(l.(?P<line>[0-9]+), c.(?P<column>[0-9]+)\): (?P<rest>.*)", line)
if match:
w.write('Line {:d}: {}\n'.format(
int(match.group('line')), xml_escape(match.group('rest'))))
write_source_line(
w, xml_lines[int(match.group('line')) - 1],
int(match.group('column')) - 1)
else:
w.data(line)
w.data('\n')
def write_result(result):
if 'network_error' in result and result['network_error'] is not None:
return
xml = result.get_xml_content()
xml_lines = xml.splitlines()
path = os.path.join(result.get_dirpath(), 'index.html')
with open(path, 'w', encoding='utf-8') as fd:
w = XMLWriter(fd)
with make_html_header(w):
with w.tag('p'):
with w.tag('a', href='vo.xml'):
w.data(result.url.decode('ascii'))
w.element('hr')
with w.tag('pre'):
w._flush()
for line in result['warnings']:
write_warning(w, line, xml_lines)
if result['xmllint'] is False:
w.element('hr')
w.element('p', 'xmllint results:')
content = result['xmllint_content']
if not isinstance(content, str):
content = content.decode('ascii')
content = content.replace(result.get_dirpath() + '/', '')
with w.tag('pre'):
w.data(content)
if 'votlint' in result:
if result['votlint'] is False:
w.element('hr')
w.element('p', 'votlint results:')
content = result['votlint_content']
if not isinstance(content, str):
content = content.decode('ascii')
with w.tag('pre'):
w._flush()
for line in content.splitlines():
write_votlint_warning(w, line, xml_lines)
def write_result_row(w, result):
with w.tag('tr'):
with w.tag('td'):
if ('network_error' in result and
result['network_error'] is not None):
w.data(result.url.decode('ascii'))
else:
w.element('a', result.url.decode('ascii'),
href='{}/index.html'.format(result.get_htmlpath()))
if 'network_error' in result and result['network_error'] is not None:
w.element('td', str(result['network_error']),
attrib={'class': 'red'})
w.element('td', '-')
w.element('td', '-')
w.element('td', '-')
w.element('td', '-')
else:
w.element('td', '-', attrib={'class': 'green'})
if result['nexceptions']:
cls = 'red'
msg = 'Fatal'
elif result['nwarnings']:
cls = 'yellow'
msg = str(result['nwarnings'])
else:
cls = 'green'
msg = '-'
w.element('td', msg, attrib={'class': cls})
msg = result['version']
if result['xmllint'] is None:
cls = ''
elif result['xmllint'] is False:
cls = 'red'
else:
cls = 'green'
w.element('td', msg, attrib={'class': cls})
if result['expected'] == 'good':
cls = 'green'
msg = '-'
elif result['expected'] == 'broken':
cls = 'red'
msg = 'net'
elif result['expected'] == 'incorrect':
cls = 'yellow'
msg = 'invalid'
w.element('td', msg, attrib={'class': cls})
if 'votlint' in result:
if result['votlint']:
cls = 'green'
msg = 'Passed'
else:
cls = 'red'
msg = 'Failed'
else:
cls = ''
msg = '?'
w.element('td', msg, attrib={'class': cls})
def write_table(basename, name, results, root="results", chunk_size=500):
def write_page_links(j):
if npages <= 1:
return
with w.tag('center'):
if j > 0:
w.element('a', '<< ', href='{}_{:02d}.html'.format(basename, j-1))
for i in range(npages):
if i == j:
w.data(str(i+1))
else:
w.element(
'a', str(i+1),
href='{}_{:02d}.html'.format(basename, i))
w.data(' ')
if j < npages - 1:
w.element('a', '>>', href='{}_{:02d}.html'.format(basename, j+1))
npages = int(ceil(float(len(results)) / chunk_size))
for i, j in enumerate(range(0, max(len(results), 1), chunk_size)):
subresults = results[j:j+chunk_size]
path = os.path.join(root, '{}_{:02d}.html'.format(basename, i))
with open(path, 'w', encoding='utf-8') as fd:
w = XMLWriter(fd)
with make_html_header(w):
write_page_links(i)
w.element('h2', name)
with w.tag('table'):
with w.tag('tr'):
w.element('th', 'URL')
w.element('th', 'Network')
w.element('th', 'Warnings')
w.element('th', 'Schema')
w.element('th', 'Expected')
w.element('th', 'votlint')
for result in subresults:
write_result_row(w, result)
write_page_links(i)
def add_subset(w, basename, name, subresults, inside=['p'], total=None):
with w.tag('tr'):
subresults = list(subresults)
if total is None:
total = len(subresults)
if total == 0: # pragma: no cover
percentage = 0.0
else:
percentage = (float(len(subresults)) / total)
with w.tag('td'):
for element in inside:
w.start(element)
w.element('a', name, href='{}_00.html'.format(basename))
for element in reversed(inside):
w.end(element)
numbers = '{:d} ({:.2%})'.format(len(subresults), percentage)
with w.tag('td'):
w.data(numbers)
def write_index(subsets, results, root='results'):
path = os.path.join(root, 'index.html')
with open(path, 'w', encoding='utf-8') as fd:
w = XMLWriter(fd)
with make_html_header(w):
w.element('h1', 'VO Validation results')
with w.tag('table'):
for subset in subsets:
add_subset(w, *subset, total=len(results))
def write_index_table(root, basename, name, subresults, inside=None,
total=None, chunk_size=500):
if total is None:
total = len(subresults)
percentage = (float(len(subresults)) / total)
numbers = '{:d} ({:.2%})'.format(len(subresults), percentage)
write_table(basename, name + ' ' + numbers, subresults, root, chunk_size)
|
f4d9671c6301fbc26800294054198b484dfda28fa542555e121739f6f1aced67 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains pytest plugins that are used by the astropy test suite.
"""
|
cde2f5498d48582146073c93060bdcc878d3ad77a0d154209dfa599c81e568d3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This plugin provides customization of configuration and cache directories used
by pytest.
"""
import datetime
import locale
import os
import sys
from collections import OrderedDict
import pytest
from ...config.paths import set_temp_config, set_temp_cache
from ...utils.argparse import writeable_directory
from ..helper import treat_deprecations_as_exceptions
import importlib.machinery as importlib_machinery
# these pytest hooks allow us to mark tests and run the marked tests with
# specific command line options.
def pytest_addoption(parser):
parser.addoption("--config-dir", nargs='?', type=writeable_directory,
help="specify directory for storing and retrieving the "
"Astropy configuration during tests (default is "
"to use a temporary directory created by the test "
"runner); be aware that using an Astropy config "
"file other than the default can cause some tests "
"to fail unexpectedly")
parser.addoption("--cache-dir", nargs='?', type=writeable_directory,
help="specify directory for storing and retrieving the "
"Astropy cache during tests (default is "
"to use a temporary directory created by the test "
"runner)")
parser.addini("config_dir",
"specify directory for storing and retrieving the "
"Astropy configuration during tests (default is "
"to use a temporary directory created by the test "
"runner); be aware that using an Astropy config "
"file other than the default can cause some tests "
"to fail unexpectedly", default=None)
parser.addini("cache_dir",
"specify directory for storing and retrieving the "
"Astropy cache during tests (default is "
"to use a temporary directory created by the test "
"runner)", default=None)
def pytest_configure(config):
treat_deprecations_as_exceptions()
def pytest_runtest_setup(item):
config_dir = item.config.getini('config_dir')
cache_dir = item.config.getini('cache_dir')
# Command-line options can override, however
config_dir = item.config.getoption('config_dir') or config_dir
cache_dir = item.config.getoption('cache_dir') or cache_dir
# We can't really use context managers directly in py.test (although
# py.test 2.7 adds the capability), so this may look a bit hacky
if config_dir:
item.set_temp_config = set_temp_config(config_dir)
item.set_temp_config.__enter__()
if cache_dir:
item.set_temp_cache = set_temp_cache(cache_dir)
item.set_temp_cache.__enter__()
def pytest_runtest_teardown(item, nextitem):
if hasattr(item, 'set_temp_cache'):
item.set_temp_cache.__exit__()
if hasattr(item, 'set_temp_config'):
item.set_temp_config.__exit__()
|
423890efaac9c12dcc44e8e2d0a2dcc63b59ead4135ecfbe83fabffea11de428 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This plugin provides customization of the header displayed by pytest for
reporting purposes.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
import datetime
import locale
import math
from collections import OrderedDict
from ..helper import ignore_warnings
from ...utils.introspection import resolve_name
PYTEST_HEADER_MODULES = OrderedDict([('Numpy', 'numpy'),
('Scipy', 'scipy'),
('Matplotlib', 'matplotlib'),
('h5py', 'h5py'),
('Pandas', 'pandas')])
# This always returns with Astropy's version
from ... import __version__
TESTED_VERSIONS = OrderedDict([('Astropy', __version__)])
def pytest_report_header(config):
try:
stdoutencoding = sys.stdout.encoding or 'ascii'
except AttributeError:
stdoutencoding = 'ascii'
args = config.args
# TESTED_VERSIONS can contain the affiliated package version, too
if len(TESTED_VERSIONS) > 1:
for pkg, version in TESTED_VERSIONS.items():
if pkg != 'Astropy':
s = "\nRunning tests with {0} version {1}.\n".format(
pkg, version)
else:
s = "\nRunning tests with Astropy version {0}.\n".format(
TESTED_VERSIONS['Astropy'])
# Per https://github.com/astropy/astropy/pull/4204, strip the rootdir from
# each directory argument
if hasattr(config, 'rootdir'):
rootdir = str(config.rootdir)
if not rootdir.endswith(os.sep):
rootdir += os.sep
dirs = [arg[len(rootdir):] if arg.startswith(rootdir) else arg
for arg in args]
else:
dirs = args
s += "Running tests in {0}.\n\n".format(" ".join(dirs))
s += "Date: {0}\n\n".format(datetime.datetime.now().isoformat()[:19])
from platform import platform
plat = platform()
if isinstance(plat, bytes):
plat = plat.decode(stdoutencoding, 'replace')
s += "Platform: {0}\n\n".format(plat)
s += "Executable: {0}\n\n".format(sys.executable)
s += "Full Python Version: \n{0}\n\n".format(sys.version)
s += "encodings: sys: {0}, locale: {1}, filesystem: {2}".format(
sys.getdefaultencoding(),
locale.getpreferredencoding(),
sys.getfilesystemencoding())
s += '\n'
s += "byteorder: {0}\n".format(sys.byteorder)
s += "float info: dig: {0.dig}, mant_dig: {0.dig}\n\n".format(
sys.float_info)
for module_display, module_name in PYTEST_HEADER_MODULES.items():
try:
with ignore_warnings(DeprecationWarning):
module = resolve_name(module_name)
except ImportError:
s += "{0}: not available\n".format(module_display)
else:
try:
version = module.__version__
except AttributeError:
version = 'unknown (no __version__ attribute)'
s += "{0}: {1}\n".format(module_display, version)
special_opts = ["remote_data", "pep8"]
opts = []
for op in special_opts:
op_value = getattr(config.option, op, None)
if op_value:
if isinstance(op_value, str):
op = ': '.join((op, op_value))
opts.append(op)
if opts:
s += "Using Astropy options: {0}.\n".format(", ".join(opts))
return s
def pytest_terminal_summary(terminalreporter):
"""Output a warning to IPython users in case any tests failed."""
try:
get_ipython()
except NameError:
return
if not terminalreporter.stats.get('failed'):
# Only issue the warning when there are actually failures
return
terminalreporter.ensure_newline()
terminalreporter.write_line(
'Some tests are known to fail when run from the IPython prompt; '
'especially, but not limited to tests involving logging and warning '
'handling. Unless you are certain as to the cause of the failure, '
'please check that the failure occurs outside IPython as well. See '
'http://docs.astropy.org/en/stable/known_issues.html#failing-logging-'
'tests-when-running-the-tests-in-ipython for more information.',
yellow=True, bold=True)
|
89fae93a671909cf71e77e5bd052aa0de122475774326e510470ff05bfd774b1 | from ... import units as u
from ..helper import assert_quantity_allclose, pytest
def test_assert_quantity_allclose():
assert_quantity_allclose([1, 2], [1, 2])
assert_quantity_allclose([1, 2] * u.m, [100, 200] * u.cm)
assert_quantity_allclose([1, 2] * u.m, [101, 201] * u.cm, atol=2 * u.cm)
with pytest.raises(AssertionError):
assert_quantity_allclose([1, 2] * u.m, [90, 200] * u.cm)
with pytest.raises(AssertionError):
assert_quantity_allclose([1, 2] * u.m, [101, 201] * u.cm, atol=0.5 * u.cm)
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2] * u.m, [100, 200])
assert exc.value.args[0] == "Units for 'desired' () and 'actual' (m) are not convertible"
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2], [100, 200] * u.cm)
assert exc.value.args[0] == "Units for 'desired' (cm) and 'actual' () are not convertible"
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2] * u.m, [100, 200] * u.cm, atol=0.3)
assert exc.value.args[0] == "Units for 'atol' () and 'actual' (m) are not convertible"
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2], [1, 2], atol=0.3 * u.m)
assert exc.value.args[0] == "Units for 'atol' (m) and 'actual' () are not convertible"
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2], [1, 2], rtol=0.3 * u.m)
assert exc.value.args[0] == "`rtol` should be dimensionless"
|
cc8f2b2c9c393c9ce6ec9146fbe3f9eafd45c6f31d67ccc70367380be034fee6 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import doctest
from textwrap import dedent
import pytest
# test helper.run_tests function
from ... import test as run_tests
from .. import helper
# run_tests should raise ValueError when asked to run on a module it can't find
def test_module_not_found():
with helper.pytest.raises(ValueError):
run_tests(package='fake.module')
# run_tests should raise ValueError when passed an invalid pastebin= option
def test_pastebin_keyword():
with helper.pytest.raises(ValueError):
run_tests(pastebin='not_an_option')
# TODO: Temporarily disabled, as this seems to non-deterministically fail
# def test_deprecation_warning():
# with pytest.raises(DeprecationWarning):
# warnings.warn('test warning', DeprecationWarning)
def test_unicode_literal_conversion():
assert isinstance('ångström', str)
def test_doctest_float_replacement(tmpdir):
test1 = dedent("""
This will demonstrate a doctest that fails due to a few extra decimal
places::
>>> 1.0 / 3.0
0.333333333333333311
""")
test2 = dedent("""
This is the same test, but it should pass with use of
+FLOAT_CMP::
>>> 1.0 / 3.0 # doctest: +FLOAT_CMP
0.333333333333333311
""")
test1_rst = tmpdir.join('test1.rst')
test2_rst = tmpdir.join('test2.rst')
test1_rst.write(test1)
test2_rst.write(test2)
with pytest.raises(doctest.DocTestFailure):
doctest.testfile(str(test1_rst), module_relative=False,
raise_on_error=True, verbose=False, encoding='utf-8')
doctest.testfile(str(test2_rst), module_relative=False,
raise_on_error=True, verbose=False, encoding='utf-8')
|
6ccf501eb4148d93f55c05cbec13b721e86245ba0f75f74cbd82a8125d430021 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pkgutil
import os
import types
def test_imports():
"""
This just imports all modules in astropy, making sure they don't have any
dependencies that sneak through
"""
from ...utils import find_current_module
pkgornm = find_current_module(1).__name__.split('.')[0]
if isinstance(pkgornm, str):
package = pkgutil.get_loader(pkgornm).load_module(pkgornm)
elif (isinstance(pkgornm, types.ModuleType) and
'__init__' in pkgornm.__file__):
package = pkgornm
else:
msg = 'test_imports is not determining a valid package/package name'
raise TypeError(msg)
if hasattr(package, '__path__'):
pkgpath = package.__path__
elif hasattr(package, '__file__'):
pkgpath = os.path.split(package.__file__)[0]
else:
raise AttributeError('package to generate config items for does not '
'have __file__ or __path__')
prefix = package.__name__ + '.'
def onerror(name):
# A legitimate error occurred in a module that wasn't excluded
raise
for imper, nm, ispkg in pkgutil.walk_packages(pkgpath, prefix,
onerror=onerror):
imper.find_module(nm)
def test_toplevel_namespace():
import astropy
d = dir(astropy)
assert 'os' not in d
assert 'log' in d
assert 'test' in d
assert 'sys' not in d
|
54bd3dc46ae37bd4ccfa81cfd8261498292cd73cef99f761a2b24764d6c65d28 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from ... import constants as const
from ...tests.helper import pickle_protocol, check_pickling_recovery # noqa
originals = [const.Constant('h_fake', 'Not Planck',
0.0, 'J s', 0.0, 'fakeref',
system='si'),
const.h,
const.e]
xfails = [True, True, True]
@pytest.mark.parametrize(("original", "xfail"), zip(originals, xfails))
def test_new_constant(pickle_protocol, original, xfail):
if xfail:
pytest.xfail()
check_pickling_recovery(original, pickle_protocol)
|
2baa144cf4c3490761077bd2427e729472587913faabc083c74fa6076c49f1f2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import pytest
from .. import Constant
from ...units import Quantity as Q
def test_c():
from ..codata2010 import c
# c is an exactly defined constant, so it shouldn't be changing
assert c.value == 2.99792458e8 # default is S.I.
assert c.si.value == 2.99792458e8
assert c.cgs.value == 2.99792458e10
# make sure it has the necessary attributes and they're not blank
assert c.uncertainty == 0 # c is a *defined* quantity
assert c.name
assert c.reference
assert c.unit
def test_h():
from ..codata2010 import h
from .. import h as h_current
# check that the value is the CODATA2010 value
assert abs(h.value - 6.62606957e-34) < 1e-43
assert abs(h.si.value - 6.62606957e-34) < 1e-43
assert abs(h.cgs.value - 6.62606957e-27) < 1e-36
# Check it is different than the current value
assert abs(h.value - h_current.value) > 4e-42
# make sure it has the necessary attributes and they're not blank
assert h.uncertainty
assert h.name
assert h.reference
assert h.unit
def test_e():
from ..astropyconst13 import e
# A test quantity
E = Q(100.00000348276221, 'V/m')
# e.cgs is too ambiguous and should not work at all
with pytest.raises(TypeError):
e.cgs * E
assert isinstance(e.si, Q)
assert isinstance(e.gauss, Q)
assert isinstance(e.esu, Q)
assert e.si * E == Q(100, 'eV/m')
assert e.gauss * E == Q(e.gauss.value * E.value, 'Fr V/m')
assert e.esu * E == Q(e.esu.value * E.value, 'Fr V/m')
def test_g0():
"""Tests for #1263 demonstrating how g0 constant should behave."""
from ..astropyconst13 import g0
# g0 is an exactly defined constant, so it shouldn't be changing
assert g0.value == 9.80665 # default is S.I.
assert g0.si.value == 9.80665
assert g0.cgs.value == 9.80665e2
# make sure it has the necessary attributes and they're not blank
assert g0.uncertainty == 0 # g0 is a *defined* quantity
assert g0.name
assert g0.reference
assert g0.unit
# Check that its unit have the correct physical type
assert g0.unit.physical_type == 'acceleration'
def test_b_wien():
"""b_wien should give the correct peak wavelength for
given blackbody temperature. The Sun is used in this test.
"""
from ..astropyconst13 import b_wien
from ... import units as u
t = 5778 * u.K
w = (b_wien / t).to(u.nm)
assert round(w.value) == 502
def test_unit():
from ... import units as u
from .. import astropyconst13 as const
for key, val in vars(const).items():
if isinstance(val, Constant):
# Getting the unit forces the unit parser to run. Confirm
# that none of the constants defined in astropy have
# invalid unit.
assert not isinstance(val.unit, u.UnrecognizedUnit)
def test_copy():
from ... import constants as const
cc = copy.deepcopy(const.c)
assert cc == const.c
cc = copy.copy(const.c)
assert cc == const.c
def test_view():
"""Check that Constant and Quantity views can be taken (#3537, #3538)."""
from .. import c
c2 = c.view(Constant)
assert c2 == c
assert c2.value == c.value
# make sure it has the necessary attributes and they're not blank
assert c2.uncertainty == 0 # c is a *defined* quantity
assert c2.name == c.name
assert c2.reference == c.reference
assert c2.unit == c.unit
q1 = c.view(Q)
assert q1 == c
assert q1.value == c.value
assert type(q1) is Q
assert not hasattr(q1, 'reference')
q2 = Q(c)
assert q2 == c
assert q2.value == c.value
assert type(q2) is Q
assert not hasattr(q2, 'reference')
c3 = Q(c, subok=True)
assert c3 == c
assert c3.value == c.value
# make sure it has the necessary attributes and they're not blank
assert c3.uncertainty == 0 # c is a *defined* quantity
assert c3.name == c.name
assert c3.reference == c.reference
assert c3.unit == c.unit
c4 = Q(c, subok=True, copy=False)
assert c4 is c
def test_context_manager():
from ... import constants as const
with const.set_enabled_constants('astropyconst13'):
assert const.h.value == 6.62606957e-34 # CODATA2010
assert const.h.value == 6.626070040e-34 # CODATA2014
with pytest.raises(ValueError):
with const.set_enabled_constants('notreal'):
const.h
|
086baa3ed5ce3f1f1598b4d8adc9ae43d5b62bc25973fe299afe2e3f5238af22 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import pytest
from .. import Constant
from ...units import Quantity as Q
def test_c():
from .. import c
# c is an exactly defined constant, so it shouldn't be changing
assert c.value == 2.99792458e8 # default is S.I.
assert c.si.value == 2.99792458e8
assert c.cgs.value == 2.99792458e10
# make sure it has the necessary attributes and they're not blank
assert c.uncertainty == 0 # c is a *defined* quantity
assert c.name
assert c.reference
assert c.unit
def test_h():
from .. import h
# check that the value is fairly close to what it should be (not exactly
# checking because this might get updated in the future)
assert abs(h.value - 6.626e-34) < 1e-38
assert abs(h.si.value - 6.626e-34) < 1e-38
assert abs(h.cgs.value - 6.626e-27) < 1e-31
# make sure it has the necessary attributes and they're not blank
assert h.uncertainty
assert h.name
assert h.reference
assert h.unit
def test_e():
"""Tests for #572 demonstrating how EM constants should behave."""
from .. import e
# A test quantity
E = Q(100, 'V/m')
# Without specifying a system e should not combine with other quantities
pytest.raises(TypeError, lambda: e * E)
# Try it again (as regression test on a minor issue mentioned in #745 where
# repeated attempts to use e in an expression resulted in UnboundLocalError
# instead of TypeError)
pytest.raises(TypeError, lambda: e * E)
# e.cgs is too ambiguous and should not work at all
pytest.raises(TypeError, lambda: e.cgs * E)
assert isinstance(e.si, Q)
assert isinstance(e.gauss, Q)
assert isinstance(e.esu, Q)
assert e.si * E == Q(100, 'eV/m')
assert e.gauss * E == Q(e.gauss.value * E.value, 'Fr V/m')
assert e.esu * E == Q(e.esu.value * E.value, 'Fr V/m')
def test_g0():
"""Tests for #1263 demonstrating how g0 constant should behave."""
from .. import g0
# g0 is an exactly defined constant, so it shouldn't be changing
assert g0.value == 9.80665 # default is S.I.
assert g0.si.value == 9.80665
assert g0.cgs.value == 9.80665e2
# make sure it has the necessary attributes and they're not blank
assert g0.uncertainty == 0 # g0 is a *defined* quantity
assert g0.name
assert g0.reference
assert g0.unit
# Check that its unit have the correct physical type
assert g0.unit.physical_type == 'acceleration'
def test_b_wien():
"""b_wien should give the correct peak wavelength for
given blackbody temperature. The Sun is used in this test.
"""
from .. import b_wien
from ... import units as u
t = 5778 * u.K
w = (b_wien / t).to(u.nm)
assert round(w.value) == 502
def test_unit():
from ... import units as u
from ... import constants as const
for key, val in vars(const).items():
if isinstance(val, Constant):
# Getting the unit forces the unit parser to run. Confirm
# that none of the constants defined in astropy have
# invalid unit.
assert not isinstance(val.unit, u.UnrecognizedUnit)
def test_copy():
from ... import constants as const
cc = copy.deepcopy(const.c)
assert cc == const.c
cc = copy.copy(const.c)
assert cc == const.c
def test_view():
"""Check that Constant and Quantity views can be taken (#3537, #3538)."""
from .. import c
c2 = c.view(Constant)
assert c2 == c
assert c2.value == c.value
# make sure it has the necessary attributes and they're not blank
assert c2.uncertainty == 0 # c is a *defined* quantity
assert c2.name == c.name
assert c2.reference == c.reference
assert c2.unit == c.unit
q1 = c.view(Q)
assert q1 == c
assert q1.value == c.value
assert type(q1) is Q
assert not hasattr(q1, 'reference')
q2 = Q(c)
assert q2 == c
assert q2.value == c.value
assert type(q2) is Q
assert not hasattr(q2, 'reference')
c3 = Q(c, subok=True)
assert c3 == c
assert c3.value == c.value
# make sure it has the necessary attributes and they're not blank
assert c3.uncertainty == 0 # c is a *defined* quantity
assert c3.name == c.name
assert c3.reference == c.reference
assert c3.unit == c.unit
c4 = Q(c, subok=True, copy=False)
assert c4 is c
|
a39e1a49a1ce802bde50cf9da056b12f6eebbd63f307bfffd40e18a378eb5a37 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from ...tests.helper import pickle_protocol, check_pickling_recovery
from ... import cosmology as cosm
originals = [cosm.FLRW]
xfails = [False]
@pytest.mark.parametrize(("original", "xfail"),
zip(originals, xfails))
def test_flrw(pickle_protocol, original, xfail):
if xfail:
pytest.xfail()
check_pickling_recovery(original, pickle_protocol)
|
13cd7cf8eb83aac53a280ed4facc4094724468b99bf7a9836ad34d8c923a553c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from io import StringIO
import pytest
import numpy as np
from .. import core, funcs
from ...tests.helper import quantity_allclose as allclose
from ...utils.compat import NUMPY_LT_1_14
from ... import units as u
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
def test_init():
""" Tests to make sure the code refuses inputs it is supposed to"""
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=-0.27)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Neff=-1)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27,
Tcmb0=u.Quantity([0.0, 2], u.K))
with pytest.raises(ValueError):
h0bad = u.Quantity([70, 100], u.km / u.s / u.Mpc)
cosmo = core.FlatLambdaCDM(H0=h0bad, Om0=0.27)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=0.5)
with pytest.raises(ValueError):
bad_mnu = u.Quantity([-0.3, 0.2, 0.1], u.eV)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=bad_mnu)
with pytest.raises(ValueError):
bad_mnu = u.Quantity([0.15, 0.2, 0.1], u.eV)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, Neff=2, m_nu=bad_mnu)
with pytest.raises(ValueError):
bad_mnu = u.Quantity([-0.3, 0.2], u.eV) # 2, expecting 3
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=bad_mnu)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Ob0=-0.04)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Ob0=0.4)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27)
cosmo.Ob(1)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27)
cosmo.Odm(1)
with pytest.raises(TypeError):
core.default_cosmology.validate(4)
def test_basic():
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0, Neff=3.04,
Ob0=0.05)
assert allclose(cosmo.Om0, 0.27)
assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4)
assert allclose(cosmo.Ob0, 0.05)
assert allclose(cosmo.Odm0, 0.27 - 0.05)
# This next test will fail if astropy.const starts returning non-mks
# units by default; see the comment at the top of core.py
assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4)
assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4)
assert allclose(cosmo.Ok0, 0.0)
assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0,
1.0, rtol=1e-6)
assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) +
cosmo.Onu(1), 1.0, rtol=1e-6)
assert allclose(cosmo.Tcmb0, 2.0 * u.K)
assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5)
assert allclose(cosmo.Neff, 3.04)
assert allclose(cosmo.h, 0.7)
assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc)
# Make sure setting them as quantities gives the same results
H0 = u.Quantity(70, u.km / (u.s * u.Mpc))
T = u.Quantity(2.0, u.K)
cosmo = core.FlatLambdaCDM(H0=H0, Om0=0.27, Tcmb0=T, Neff=3.04, Ob0=0.05)
assert allclose(cosmo.Om0, 0.27)
assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4)
assert allclose(cosmo.Ob0, 0.05)
assert allclose(cosmo.Odm0, 0.27 - 0.05)
assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4)
assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4)
assert allclose(cosmo.Ok0, 0.0)
assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0,
1.0, rtol=1e-6)
assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) +
cosmo.Onu(1), 1.0, rtol=1e-6)
assert allclose(cosmo.Tcmb0, 2.0 * u.K)
assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5)
assert allclose(cosmo.Neff, 3.04)
assert allclose(cosmo.h, 0.7)
assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc)
@pytest.mark.skipif('not HAS_SCIPY')
def test_units():
""" Test if the right units are being returned"""
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0)
assert cosmo.comoving_distance(1.0).unit == u.Mpc
assert cosmo._comoving_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.comoving_transverse_distance(1.0).unit == u.Mpc
assert cosmo._comoving_transverse_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.angular_diameter_distance(1.0).unit == u.Mpc
assert cosmo.angular_diameter_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.luminosity_distance(1.0).unit == u.Mpc
assert cosmo.lookback_time(1.0).unit == u.Gyr
assert cosmo.lookback_distance(1.0).unit == u.Mpc
assert cosmo.H0.unit == u.km / u.Mpc / u.s
assert cosmo.H(1.0).unit == u.km / u.Mpc / u.s
assert cosmo.Tcmb0.unit == u.K
assert cosmo.Tcmb(1.0).unit == u.K
assert cosmo.Tcmb([0.0, 1.0]).unit == u.K
assert cosmo.Tnu0.unit == u.K
assert cosmo.Tnu(1.0).unit == u.K
assert cosmo.Tnu([0.0, 1.0]).unit == u.K
assert cosmo.arcsec_per_kpc_comoving(1.0).unit == u.arcsec / u.kpc
assert cosmo.arcsec_per_kpc_proper(1.0).unit == u.arcsec / u.kpc
assert cosmo.kpc_comoving_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.kpc_proper_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.critical_density(1.0).unit == u.g / u.cm ** 3
assert cosmo.comoving_volume(1.0).unit == u.Mpc ** 3
assert cosmo.age(1.0).unit == u.Gyr
assert cosmo.distmod(1.0).unit == u.mag
@pytest.mark.skipif('not HAS_SCIPY')
def test_distance_broadcast():
""" Test array shape broadcasting for functions with single
redshift inputs"""
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27,
m_nu=u.Quantity([0.0, 0.1, 0.011], u.eV))
z = np.linspace(0.1, 1, 6)
z_reshape2d = z.reshape(2, 3)
z_reshape3d = z.reshape(3, 2, 1)
# Things with units
methods = ['comoving_distance', 'luminosity_distance',
'comoving_transverse_distance', 'angular_diameter_distance',
'distmod', 'lookback_time', 'age', 'comoving_volume',
'differential_comoving_volume', 'kpc_comoving_per_arcmin']
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert value_flat.unit == value_2d.unit
assert value_flat.unit == value_3d.unit
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
# Also test unitless ones
methods = ['absorption_distance', 'Om', 'Ode', 'Ok', 'H',
'w', 'de_density_scale', 'Onu', 'Ogamma',
'nu_relative_density']
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
# Test some dark energy models
methods = ['Om', 'Ode', 'w', 'de_density_scale']
for tcosmo in [core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.5),
core.wCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2),
core.w0waCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wa=-0.2),
core.wpwaCDM(H0=70, Om0=0.27, Ode0=0.5,
wp=-1.2, wa=-0.2, zp=0.9),
core.w0wzCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wz=0.1)]:
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
@pytest.mark.skipif('not HAS_SCIPY')
def test_clone():
""" Test clone operation"""
cosmo = core.FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.27,
Tcmb0=3.0 * u.K)
z = np.linspace(0.1, 3, 15)
# First, test with no changes, which should return same object
newclone = cosmo.clone()
assert newclone is cosmo
# Now change H0
# Note that H0 affects Ode0 because it changes Ogamma0
newclone = cosmo.clone(H0=60 * u.km / u.s / u.Mpc)
assert newclone is not cosmo
assert newclone.__class__ == cosmo.__class__
assert newclone.name == cosmo.name
assert not allclose(newclone.H0.value, cosmo.H0.value)
assert allclose(newclone.H0, 60.0 * u.km / u.s / u.Mpc)
assert allclose(newclone.Om0, cosmo.Om0)
assert allclose(newclone.Ok0, cosmo.Ok0)
assert not allclose(newclone.Ogamma0, cosmo.Ogamma0)
assert not allclose(newclone.Onu0, cosmo.Onu0)
assert allclose(newclone.Tcmb0, cosmo.Tcmb0)
assert allclose(newclone.m_nu, cosmo.m_nu)
assert allclose(newclone.Neff, cosmo.Neff)
# Compare modified version with directly instantiated one
cmp = core.FlatLambdaCDM(H0=60 * u.km / u.s / u.Mpc, Om0=0.27,
Tcmb0=3.0 * u.K)
assert newclone.__class__ == cmp.__class__
assert newclone.name == cmp.name
assert allclose(newclone.H0, cmp.H0)
assert allclose(newclone.Om0, cmp.Om0)
assert allclose(newclone.Ode0, cmp.Ode0)
assert allclose(newclone.Ok0, cmp.Ok0)
assert allclose(newclone.Ogamma0, cmp.Ogamma0)
assert allclose(newclone.Onu0, cmp.Onu0)
assert allclose(newclone.Tcmb0, cmp.Tcmb0)
assert allclose(newclone.m_nu, cmp.m_nu)
assert allclose(newclone.Neff, cmp.Neff)
assert allclose(newclone.Om(z), cmp.Om(z))
assert allclose(newclone.H(z), cmp.H(z))
assert allclose(newclone.luminosity_distance(z),
cmp.luminosity_distance(z))
# Now try changing multiple things
newclone = cosmo.clone(name="New name", H0=65 * u.km / u.s / u.Mpc,
Tcmb0=2.8 * u.K)
assert newclone.__class__ == cosmo.__class__
assert not newclone.name == cosmo.name
assert not allclose(newclone.H0.value, cosmo.H0.value)
assert allclose(newclone.H0, 65.0 * u.km / u.s / u.Mpc)
assert allclose(newclone.Om0, cosmo.Om0)
assert allclose(newclone.Ok0, cosmo.Ok0)
assert not allclose(newclone.Ogamma0, cosmo.Ogamma0)
assert not allclose(newclone.Onu0, cosmo.Onu0)
assert not allclose(newclone.Tcmb0.value, cosmo.Tcmb0.value)
assert allclose(newclone.Tcmb0, 2.8 * u.K)
assert allclose(newclone.m_nu, cosmo.m_nu)
assert allclose(newclone.Neff, cosmo.Neff)
# And direct comparison
cmp = core.FlatLambdaCDM(name="New name", H0=65 * u.km / u.s / u.Mpc,
Om0=0.27, Tcmb0=2.8 * u.K)
assert newclone.__class__ == cmp.__class__
assert newclone.name == cmp.name
assert allclose(newclone.H0, cmp.H0)
assert allclose(newclone.Om0, cmp.Om0)
assert allclose(newclone.Ode0, cmp.Ode0)
assert allclose(newclone.Ok0, cmp.Ok0)
assert allclose(newclone.Ogamma0, cmp.Ogamma0)
assert allclose(newclone.Onu0, cmp.Onu0)
assert allclose(newclone.Tcmb0, cmp.Tcmb0)
assert allclose(newclone.m_nu, cmp.m_nu)
assert allclose(newclone.Neff, cmp.Neff)
assert allclose(newclone.Om(z), cmp.Om(z))
assert allclose(newclone.H(z), cmp.H(z))
assert allclose(newclone.luminosity_distance(z),
cmp.luminosity_distance(z))
# Try a dark energy class, make sure it can handle w params
cosmo = core.w0waCDM(name="test w0wa", H0=70 * u.km / u.s / u.Mpc,
Om0=0.27, Ode0=0.5, wa=0.1, Tcmb0=4.0 * u.K)
newclone = cosmo.clone(w0=-1.1, wa=0.2)
assert newclone.__class__ == cosmo.__class__
assert newclone.name == cosmo.name
assert allclose(newclone.H0, cosmo.H0)
assert allclose(newclone.Om0, cosmo.Om0)
assert allclose(newclone.Ode0, cosmo.Ode0)
assert allclose(newclone.Ok0, cosmo.Ok0)
assert not allclose(newclone.w0, cosmo.w0)
assert allclose(newclone.w0, -1.1)
assert not allclose(newclone.wa, cosmo.wa)
assert allclose(newclone.wa, 0.2)
# Now test exception if user passes non-parameter
with pytest.raises(AttributeError):
newclone = cosmo.clone(not_an_arg=4)
def test_xtfuncs():
""" Test of absorption and lookback integrand"""
cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725)
z = np.array([2.0, 3.2])
assert allclose(cosmo.lookback_time_integrand(3), 0.052218976654969378,
rtol=1e-4)
assert allclose(cosmo.lookback_time_integrand(z),
[0.10333179, 0.04644541], rtol=1e-4)
assert allclose(cosmo.abs_distance_integrand(3), 3.3420145059180402,
rtol=1e-4)
assert allclose(cosmo.abs_distance_integrand(z),
[2.7899584, 3.44104758], rtol=1e-4)
def test_repr():
""" Test string representation of built in classes"""
cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725)
expected = ('LambdaCDM(H0=70 km / (Mpc s), Om0=0.3, '
'Ode0=0.5, Tcmb0=2.725 K, Neff=3.04, m_nu=[{}] eV, '
'Ob0=None)').format(' 0. 0. 0.' if NUMPY_LT_1_14 else
'0. 0. 0.')
assert str(cosmo) == expected
cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725, m_nu=u.Quantity(0.01, u.eV))
expected = ('LambdaCDM(H0=70 km / (Mpc s), Om0=0.3, Ode0=0.5, '
'Tcmb0=2.725 K, Neff=3.04, m_nu=[{}] eV, '
'Ob0=None)').format(' 0.01 0.01 0.01' if NUMPY_LT_1_14 else
'0.01 0.01 0.01')
assert str(cosmo) == expected
cosmo = core.FlatLambdaCDM(50.0, 0.27, Tcmb0=3, Ob0=0.05)
expected = ('FlatLambdaCDM(H0=50 km / (Mpc s), Om0=0.27, '
'Tcmb0=3 K, Neff=3.04, m_nu=[{}] eV, Ob0=0.05)').format(
' 0. 0. 0.' if NUMPY_LT_1_14 else '0. 0. 0.')
assert str(cosmo) == expected
cosmo = core.wCDM(60.0, 0.27, 0.6, Tcmb0=2.725, w0=-0.8, name='test1')
expected = ('wCDM(name="test1", H0=60 km / (Mpc s), Om0=0.27, '
'Ode0=0.6, w0=-0.8, Tcmb0=2.725 K, Neff=3.04, '
'm_nu=[{}] eV, Ob0=None)').format(
' 0. 0. 0.' if NUMPY_LT_1_14 else '0. 0. 0.')
assert str(cosmo) == expected
cosmo = core.FlatwCDM(65.0, 0.27, w0=-0.6, name='test2')
expected = ('FlatwCDM(name="test2", H0=65 km / (Mpc s), Om0=0.27, '
'w0=-0.6, Tcmb0=0 K, Neff=3.04, m_nu=None, Ob0=None)')
assert str(cosmo) == expected
cosmo = core.w0waCDM(60.0, 0.25, 0.4, w0=-0.6, Tcmb0=2.725, wa=0.1, name='test3')
expected = ('w0waCDM(name="test3", H0=60 km / (Mpc s), Om0=0.25, '
'Ode0=0.4, w0=-0.6, wa=0.1, Tcmb0=2.725 K, Neff=3.04, '
'm_nu=[{}] eV, Ob0=None)').format(
' 0. 0. 0.' if NUMPY_LT_1_14 else '0. 0. 0.')
assert str(cosmo) == expected
cosmo = core.Flatw0waCDM(55.0, 0.35, w0=-0.9, wa=-0.2, name='test4',
Ob0=0.0456789)
expected = ('Flatw0waCDM(name="test4", H0=55 km / (Mpc s), Om0=0.35, '
'w0=-0.9, Tcmb0=0 K, Neff=3.04, m_nu=None, '
'Ob0=0.0457)')
assert str(cosmo) == expected
cosmo = core.wpwaCDM(50.0, 0.3, 0.3, wp=-0.9, wa=-0.2,
zp=0.3, name='test5')
expected = ('wpwaCDM(name="test5", H0=50 km / (Mpc s), Om0=0.3, '
'Ode0=0.3, wp=-0.9, wa=-0.2, zp=0.3, Tcmb0=0 K, '
'Neff=3.04, m_nu=None, Ob0=None)')
assert str(cosmo) == expected
cosmo = core.w0wzCDM(55.0, 0.4, 0.8, w0=-1.05, wz=-0.2, Tcmb0=2.725,
m_nu=u.Quantity([0.001, 0.01, 0.015], u.eV))
expected = ('w0wzCDM(H0=55 km / (Mpc s), Om0=0.4, Ode0=0.8, w0=-1.05, '
'wz=-0.2 Tcmb0=2.725 K, Neff=3.04, '
'm_nu=[{}] eV, Ob0=None)').format(
' 0.001 0.01 0.015' if NUMPY_LT_1_14 else
'0.001 0.01 0.015')
assert str(cosmo) == expected
@pytest.mark.skipif('not HAS_SCIPY')
def test_flat_z1():
""" Test a flat cosmology at z=1 against several other on-line
calculators.
"""
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)
z = 1
# Test values were taken from the following web cosmology
# calculators on 27th Feb 2012:
# Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html
# (http://adsabs.harvard.edu/abs/2006PASP..118.1711W)
# Kempner: http://www.kempner.net/cosmic.php
# iCosmos: http://www.icosmos.co.uk/index.html
# The order of values below is Wright, Kempner, iCosmos'
assert allclose(cosmo.comoving_distance(z),
[3364.5, 3364.8, 3364.7988] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.angular_diameter_distance(z),
[1682.3, 1682.4, 1682.3994] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.luminosity_distance(z),
[6729.2, 6729.6, 6729.5976] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.lookback_time(z),
[7.841, 7.84178, 7.843] * u.Gyr, rtol=1e-3)
assert allclose(cosmo.lookback_distance(z),
[2404.0, 2404.24, 2404.4] * u.Mpc, rtol=1e-3)
def test_zeroing():
""" Tests if setting params to 0s always respects that"""
# Make sure Ode = 0 behaves that way
cosmo = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0)
assert allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0])
assert allclose(cosmo.Ode(1), 0)
# Ogamma0 and Onu
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)
assert allclose(cosmo.Ogamma(1.5), [0, 0, 0, 0])
assert allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0])
assert allclose(cosmo.Onu(1.5), [0, 0, 0, 0])
assert allclose(cosmo.Onu([0, 1, 2, 3]), [0, 0, 0, 0])
# Obaryon
cosmo = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Ob0=0.0)
assert allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0])
# This class is to test whether the routines work correctly
# if one only overloads w(z)
class test_cos_sub(core.FLRW):
def __init__(self):
core.FLRW.__init__(self, 70.0, 0.27, 0.73, Tcmb0=0.0,
name="test_cos")
self._w0 = -0.9
def w(self, z):
return self._w0 * np.ones_like(z)
# Similar, but with neutrinos
class test_cos_subnu(core.FLRW):
def __init__(self):
core.FLRW.__init__(self, 70.0, 0.27, 0.73, Tcmb0=3.0,
m_nu=0.1 * u.eV, name="test_cos_nu")
self._w0 = -0.8
def w(self, z):
return self._w0 * np.ones_like(z)
@pytest.mark.skipif('not HAS_SCIPY')
def test_de_subclass():
# This is the comparison object
z = [0.2, 0.4, 0.6, 0.9]
cosmo = core.wCDM(H0=70, Om0=0.27, Ode0=0.73, w0=-0.9, Tcmb0=0.0)
# Values taken from Ned Wrights advanced cosmo calculator, Aug 17 2012
assert allclose(cosmo.luminosity_distance(z),
[975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)
# Now try the subclass that only gives w(z)
cosmo = test_cos_sub()
assert allclose(cosmo.luminosity_distance(z),
[975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)
# Test efunc
assert allclose(cosmo.efunc(1.0), 1.7489240754, rtol=1e-5)
assert allclose(cosmo.efunc([0.5, 1.0]),
[1.31744953, 1.7489240754], rtol=1e-5)
assert allclose(cosmo.inv_efunc([0.5, 1.0]),
[0.75904236, 0.57178011], rtol=1e-5)
# Test de_density_scale
assert allclose(cosmo.de_density_scale(1.0), 1.23114444, rtol=1e-4)
assert allclose(cosmo.de_density_scale([0.5, 1.0]),
[1.12934694, 1.23114444], rtol=1e-4)
# Add neutrinos for efunc, inv_efunc
@pytest.mark.skipif('not HAS_SCIPY')
def test_varyde_lumdist_mathematica():
"""Tests a few varying dark energy EOS models against a mathematica
computation"""
# w0wa models
z = np.array([0.2, 0.4, 0.9, 1.2])
cosmo = core.w0waCDM(H0=70, Om0=0.2, Ode0=0.8, w0=-1.1, wa=0.2, Tcmb0=0.0)
assert allclose(cosmo.w0, -1.1)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.luminosity_distance(z),
[1004.0, 2268.62, 6265.76, 9061.84] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.de_density_scale(0.0), 1.0, rtol=1e-5)
assert allclose(cosmo.de_density_scale([0.0, 0.5, 1.5]),
[1.0, 0.9246310669529021, 0.9184087000251957])
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.0, Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[971.667, 2141.67, 5685.96, 8107.41] * u.Mpc, rtol=1e-4)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=-0.5,
Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[974.087, 2157.08, 5783.92, 8274.08] * u.Mpc, rtol=1e-4)
# wpwa models
cosmo = core.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.5,
Tcmb0=0.0)
assert allclose(cosmo.wp, -1.1)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.zp, 0.5)
assert allclose(cosmo.luminosity_distance(z),
[1010.81, 2294.45, 6369.45, 9218.95] * u.Mpc, rtol=1e-4)
cosmo = core.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.9,
Tcmb0=0.0)
assert allclose(cosmo.wp, -1.1)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.zp, 0.9)
assert allclose(cosmo.luminosity_distance(z),
[1013.68, 2305.3, 6412.37, 9283.33] * u.Mpc, rtol=1e-4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_matter():
# Test non-relativistic matter evolution
tcos = core.FlatLambdaCDM(70.0, 0.3, Ob0=0.045)
assert allclose(tcos.Om0, 0.3)
assert allclose(tcos.H0, 70.0 * u.km / u.s / u.Mpc)
assert allclose(tcos.Om(0), 0.3)
assert allclose(tcos.Ob(0), 0.045)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Om(z), [0.3, 0.59124088, 0.77419355, 0.92045455],
rtol=1e-4)
assert allclose(tcos.Ob(z),
[0.045, 0.08868613, 0.11612903, 0.13806818], rtol=1e-4)
assert allclose(tcos.Odm(z), [0.255, 0.50255474, 0.65806452, 0.78238636],
rtol=1e-4)
# Consistency of dark and baryonic matter evolution with all
# non-relativistic matter
assert allclose(tcos.Ob(z) + tcos.Odm(z), tcos.Om(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_ocurv():
# Test Ok evolution
# Flat, boring case
tcos = core.FlatLambdaCDM(70.0, 0.3)
assert allclose(tcos.Ok0, 0.0)
assert allclose(tcos.Ok(0), 0.0)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Ok(z), [0.0, 0.0, 0.0, 0.0],
rtol=1e-6)
# Not flat
tcos = core.LambdaCDM(70.0, 0.3, 0.5, Tcmb0=u.Quantity(0.0, u.K))
assert allclose(tcos.Ok0, 0.2)
assert allclose(tcos.Ok(0), 0.2)
assert allclose(tcos.Ok(z), [0.2, 0.22929936, 0.21621622, 0.17307692],
rtol=1e-4)
# Test the sum; note that Ogamma/Onu are 0
assert allclose(tcos.Ok(z) + tcos.Om(z) + tcos.Ode(z),
[1.0, 1.0, 1.0, 1.0], rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_ode():
# Test Ode evolution, turn off neutrinos, cmb
tcos = core.FlatLambdaCDM(70.0, 0.3, Tcmb0=0)
assert allclose(tcos.Ode0, 0.7)
assert allclose(tcos.Ode(0), 0.7)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Ode(z), [0.7, 0.408759, 0.2258065, 0.07954545],
rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_ogamma():
"""Tests the effects of changing the temperature of the CMB"""
# Tested against Ned Wright's advanced cosmology calculator,
# Sep 7 2012. The accuracy of our comparision is limited by
# how many digits it outputs, which limits our test to about
# 0.2% accuracy. The NWACC does not allow one
# to change the number of nuetrino species, fixing that at 3.
# Also, inspection of the NWACC code shows it uses inaccurate
# constants at the 0.2% level (specifically, a_B),
# so we shouldn't expect to match it that well. The integral is
# also done rather crudely. Therefore, we should not expect
# the NWACC to be accurate to better than about 0.5%, which is
# unfortunate, but reflects a problem with it rather than this code.
# More accurate tests below using Mathematica
z = np.array([1.0, 10.0, 500.0, 1000.0])
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.9, 858.2, 26.855, 13.642] * u.Mpc, rtol=5e-4)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.8, 857.9, 26.767, 13.582] * u.Mpc, rtol=5e-4)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.4, 856.6, 26.489, 13.405] * u.Mpc, rtol=5e-4)
# Next compare with doing the integral numerically in Mathematica,
# which allows more precision in the test. It is at least as
# good as 0.01%, possibly better
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.91, 858.205, 26.8586, 13.6469] * u.Mpc, rtol=1e-5)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.76, 857.817, 26.7688, 13.5841] * u.Mpc, rtol=1e-5)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.21, 856.411, 26.4845, 13.4028] * u.Mpc, rtol=1e-5)
# Just to be really sure, we also do a version where the integral
# is analytic, which is a Ode = 0 flat universe. In this case
# Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1)
# Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance.
Ogamma0h2 = 4 * 5.670373e-8 / 299792458.0 ** 3 * 2.725 ** 4 / 1.87837e-26
Onu0h2 = Ogamma0h2 * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) * 3.04
Or0 = (Ogamma0h2 + Onu0h2) / 0.7 ** 2
Om0 = 1.0 - Or0
hubdis = (299792.458 / 70.0) * u.Mpc
cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=2.725, Neff=3.04)
targvals = 2.0 * hubdis * \
(np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
# And integers for z
assert allclose(cosmo.comoving_distance(z.astype(int)),
targvals, rtol=1e-5)
# Try Tcmb0 = 4
Or0 *= (4.0 / 2.725) ** 4
Om0 = 1.0 - Or0
cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=4.0, Neff=3.04)
targvals = 2.0 * hubdis * \
(np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_tcmb():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.5)
assert allclose(cosmo.Tcmb0, 2.5 * u.K)
assert allclose(cosmo.Tcmb(2), 7.5 * u.K)
z = [0.0, 1.0, 2.0, 3.0, 9.0]
assert allclose(cosmo.Tcmb(z),
[2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
# Make sure it's the same for integers
z = [0, 1, 2, 3, 9]
assert allclose(cosmo.Tcmb(z),
[2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
@pytest.mark.skipif('not HAS_SCIPY')
def test_tnu():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert allclose(cosmo.Tnu0, 2.1412975665108247 * u.K, rtol=1e-6)
assert allclose(cosmo.Tnu(2), 6.423892699532474 * u.K, rtol=1e-6)
z = [0.0, 1.0, 2.0, 3.0]
expected = [2.14129757, 4.28259513, 6.4238927, 8.56519027] * u.K
assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
# Test for integers
z = [0, 1, 2, 3]
assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
def test_efunc_vs_invefunc():
""" Test that efunc and inv_efunc give inverse values"""
# Note that all of the subclasses here don't need
# scipy because they don't need to call de_density_scale
# The test following this tests the case where that is needed.
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
# Below are the 'standard' included cosmologies
# We do the non-standard case in test_efunc_vs_invefunc_flrw,
# since it requires scipy
cosmo = core.LambdaCDM(70, 0.3, 0.5)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.LambdaCDM(70, 0.3, 0.5, m_nu=u.Quantity(0.01, u.eV))
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.FlatLambdaCDM(50.0, 0.27)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.wCDM(60.0, 0.27, 0.6, w0=-0.8)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.FlatwCDM(65.0, 0.27, w0=-0.6)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.w0waCDM(60.0, 0.25, 0.4, w0=-0.6, wa=0.1)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.Flatw0waCDM(55.0, 0.35, w0=-0.9, wa=-0.2)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.wpwaCDM(50.0, 0.3, 0.3, wp=-0.9, wa=-0.2, zp=0.3)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.w0wzCDM(55.0, 0.4, 0.8, w0=-1.05, wz=-0.2)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_efunc_vs_invefunc_flrw():
""" Test that efunc and inv_efunc give inverse values"""
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
# FLRW is abstract, so requires test_cos_sub defined earlier
# This requires scipy, unlike the built-ins, because it
# calls de_density_scale, which has an integral in it
cosmo = test_cos_sub()
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
# Add neutrinos
cosmo = test_cos_subnu()
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_kpc_methods():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(cosmo.arcsec_per_kpc_comoving(3),
0.0317179167 * u.arcsec / u.kpc)
assert allclose(cosmo.arcsec_per_kpc_proper(3),
0.1268716668 * u.arcsec / u.kpc)
assert allclose(cosmo.kpc_comoving_per_arcmin(3),
1891.6753126 * u.kpc / u.arcmin)
assert allclose(cosmo.kpc_proper_per_arcmin(3),
472.918828 * u.kpc / u.arcmin)
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_volume():
c_flat = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = core.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test against ned wright's calculator (cubic Gpc)
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,
3654.802]) * u.Gpc**3
wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,
3123.814]) * u.Gpc**3
wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,
358.992]) * u.Gpc**3
# The wright calculator isn't very accurate, so we use a rather
# modest precision
assert allclose(c_flat.comoving_volume(redshifts), wright_flat,
rtol=1e-2)
assert allclose(c_open.comoving_volume(redshifts),
wright_open, rtol=1e-2)
assert allclose(c_closed.comoving_volume(redshifts),
wright_closed, rtol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_differential_comoving_volume():
from scipy.integrate import quad
c_flat = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = core.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test that integration of differential_comoving_volume()
# yields same as comoving_volume()
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,
3654.802]) * u.Gpc**3
wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,
3123.814]) * u.Gpc**3
wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,
358.992]) * u.Gpc**3
# The wright calculator isn't very accurate, so we use a rather
# modest precision.
ftemp = lambda x: c_flat.differential_comoving_volume(x).value
otemp = lambda x: c_open.differential_comoving_volume(x).value
ctemp = lambda x: c_closed.differential_comoving_volume(x).value
# Multiply by solid_angle (4 * pi)
assert allclose(np.array([4.0 * np.pi * quad(ftemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_flat, rtol=1e-2)
assert allclose(np.array([4.0 * np.pi * quad(otemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_open, rtol=1e-2)
assert allclose(np.array([4.0 * np.pi * quad(ctemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_closed, rtol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_flat_open_closed_icosmo():
""" Test against the tabulated values generated from icosmo.org
with three example cosmologies (flat, open and closed).
"""
cosmo_flat = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.7
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 669.77536 576.15085 778.61386
0.32500000 1285.5964 970.26143 1703.4152
0.50000000 1888.6254 1259.0836 2832.9381
0.66250000 2395.5489 1440.9317 3982.6000
0.82500000 2855.5732 1564.6976 5211.4210
1.0000000 3303.8288 1651.9144 6607.6577
1.1625000 3681.1867 1702.2829 7960.5663
1.3250000 4025.5229 1731.4077 9359.3408
1.5000000 4363.8558 1745.5423 10909.640
1.6625000 4651.4830 1747.0359 12384.573
1.8250000 4916.5970 1740.3883 13889.387
2.0000000 5179.8621 1726.6207 15539.586
2.1625000 5406.0204 1709.4136 17096.540
2.3250000 5616.5075 1689.1752 18674.888
2.5000000 5827.5418 1665.0120 20396.396
2.6625000 6010.4886 1641.0890 22013.414
2.8250000 6182.1688 1616.2533 23646.796
3.0000000 6355.6855 1588.9214 25422.742
3.1625000 6507.2491 1563.3031 27086.425
3.3250000 6650.4520 1537.6768 28763.205
3.5000000 6796.1499 1510.2555 30582.674
3.6625000 6924.2096 1485.0852 32284.127
3.8250000 7045.8876 1460.2876 33996.408
4.0000000 7170.3664 1434.0733 35851.832
4.1625000 7280.3423 1410.2358 37584.767
4.3250000 7385.3277 1386.9160 39326.870
4.5000000 7493.2222 1362.4040 41212.722
4.6625000 7588.9589 1340.2135 42972.480
"""
cosmo_open = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 643.08185 553.18868 747.58265
0.32500000 1200.9858 906.40441 1591.3062
0.50000000 1731.6262 1154.4175 2597.4393
0.66250000 2174.3252 1307.8648 3614.8157
0.82500000 2578.7616 1413.0201 4706.2399
1.0000000 2979.3460 1489.6730 5958.6920
1.1625000 3324.2002 1537.2024 7188.5829
1.3250000 3646.8432 1568.5347 8478.9104
1.5000000 3972.8407 1589.1363 9932.1017
1.6625000 4258.1131 1599.2913 11337.226
1.8250000 4528.5346 1603.0211 12793.110
2.0000000 4804.9314 1601.6438 14414.794
2.1625000 5049.2007 1596.5852 15968.097
2.3250000 5282.6693 1588.7727 17564.875
2.5000000 5523.0914 1578.0261 19330.820
2.6625000 5736.9813 1566.4113 21011.694
2.8250000 5942.5803 1553.6158 22730.370
3.0000000 6155.4289 1538.8572 24621.716
3.1625000 6345.6997 1524.4924 26413.975
3.3250000 6529.3655 1509.6799 28239.506
3.5000000 6720.2676 1493.3928 30241.204
3.6625000 6891.5474 1478.0799 32131.840
3.8250000 7057.4213 1462.6780 34052.058
4.0000000 7230.3723 1446.0745 36151.862
4.1625000 7385.9998 1430.7021 38130.224
4.3250000 7537.1112 1415.4199 40135.117
4.5000000 7695.0718 1399.1040 42322.895
4.6625000 7837.5510 1384.1150 44380.133
"""
cosmo_closed = """\
# from icosmo (icosmo.org)
# Om 2 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 601.80160 517.67879 699.59436
0.32500000 1057.9502 798.45297 1401.7840
0.50000000 1438.2161 958.81076 2157.3242
0.66250000 1718.6778 1033.7912 2857.3019
0.82500000 1948.2400 1067.5288 3555.5381
1.0000000 2152.7954 1076.3977 4305.5908
1.1625000 2312.3427 1069.2914 5000.4410
1.3250000 2448.9755 1053.3228 5693.8681
1.5000000 2575.6795 1030.2718 6439.1988
1.6625000 2677.9671 1005.8092 7130.0873
1.8250000 2768.1157 979.86398 7819.9270
2.0000000 2853.9222 951.30739 8561.7665
2.1625000 2924.8116 924.84161 9249.7167
2.3250000 2988.5333 898.80701 9936.8732
2.5000000 3050.3065 871.51614 10676.073
2.6625000 3102.1909 847.01459 11361.774
2.8250000 3149.5043 823.39982 12046.854
3.0000000 3195.9966 798.99915 12783.986
3.1625000 3235.5334 777.30533 13467.908
3.3250000 3271.9832 756.52790 14151.327
3.5000000 3308.1758 735.15017 14886.791
3.6625000 3339.2521 716.19347 15569.263
3.8250000 3368.1489 698.06195 16251.319
4.0000000 3397.0803 679.41605 16985.401
4.1625000 3422.1142 662.87926 17666.664
4.3250000 3445.5542 647.05243 18347.576
4.5000000 3469.1805 630.76008 19080.493
4.6625000 3489.7534 616.29199 19760.729
"""
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_flat), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_open), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.1, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_closed), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=2, Ode0=0.1, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
@pytest.mark.skipif('not HAS_SCIPY')
def test_integral():
# Test integer vs. floating point inputs
cosmo = core.LambdaCDM(H0=73.2, Om0=0.3, Ode0=0.50)
assert allclose(cosmo.comoving_distance(3),
cosmo.comoving_distance(3.0), rtol=1e-7)
assert allclose(cosmo.comoving_distance([1, 2, 3, 5]),
cosmo.comoving_distance([1.0, 2.0, 3.0, 5.0]),
rtol=1e-7)
assert allclose(cosmo.efunc(6), cosmo.efunc(6.0), rtol=1e-7)
assert allclose(cosmo.efunc([1, 2, 6]),
cosmo.efunc([1.0, 2.0, 6.0]), rtol=1e-7)
assert allclose(cosmo.inv_efunc([1, 2, 6]),
cosmo.inv_efunc([1.0, 2.0, 6.0]), rtol=1e-7)
def test_wz():
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)
assert allclose(cosmo.w(1.0), -1.)
assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-1., -1, -1, -1, -1, -1])
cosmo = core.wCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-0.5)
assert allclose(cosmo.w(1.0), -0.5)
assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-0.5, -0.5, -0.5, -0.5, -0.5, -0.5])
assert allclose(cosmo.w0, -0.5)
cosmo = core.w0wzCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wz=0.5)
assert allclose(cosmo.w(1.0), -0.5)
assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),
[-1.0, -0.75, -0.5, -0.25, 0.15])
assert allclose(cosmo.w0, -1.0)
assert allclose(cosmo.wz, 0.5)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5)
assert allclose(cosmo.w0, -1.0)
assert allclose(cosmo.wa, -0.5)
assert allclose(cosmo.w(1.0), -1.25)
assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),
[-1, -1.16666667, -1.25, -1.3, -1.34848485])
cosmo = core.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9,
wa=0.2, zp=0.5)
assert allclose(cosmo.wp, -0.9)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.zp, 0.5)
assert allclose(cosmo.w(0.5), -0.9)
assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-0.94848485, -0.93333333, -0.9, -0.84666667,
-0.82380952, -0.78266667])
@pytest.mark.skipif('not HAS_SCIPY')
def test_de_densityscale():
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)
z = np.array([0.1, 0.2, 0.5, 1.5, 2.5])
assert allclose(cosmo.de_density_scale(z),
[1.0, 1.0, 1.0, 1.0, 1.0])
# Integer check
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.wCDM(H0=70, Om0=0.3, Ode0=0.60, w0=-0.5)
assert allclose(cosmo.de_density_scale(z),
[1.15369, 1.31453, 1.83712, 3.95285, 6.5479],
rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.w0wzCDM(H0=70, Om0=0.3, Ode0=0.50, w0=-1, wz=0.5)
assert allclose(cosmo.de_density_scale(z),
[0.746048, 0.5635595, 0.25712378, 0.026664129,
0.0035916468], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5)
assert allclose(cosmo.de_density_scale(z),
[0.9934201, 0.9767912, 0.897450,
0.622236, 0.4458753], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9,
wa=0.2, zp=0.5)
assert allclose(cosmo.de_density_scale(z),
[1.012246048, 1.0280102, 1.087439,
1.324988, 1.565746], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
@pytest.mark.skipif('not HAS_SCIPY')
def test_age():
# WMAP7 but with Omega_relativisitic = 0
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.hubble_time, 13.889094057856937 * u.Gyr)
assert allclose(tcos.age(4), 1.5823603508870991 * u.Gyr)
assert allclose(tcos.age([1., 5.]),
[5.97113193, 1.20553129] * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.97113193, 1.20553129] * u.Gyr)
# Add relativistic species
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert allclose(tcos.age(4), 1.5773003779230699 * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.96344942, 1.20093077] * u.Gyr)
# And massive neutrinos
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0,
m_nu=0.1 * u.eV)
assert allclose(tcos.age(4), 1.5546485439853412 * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.88448152, 1.18383759] * u.Gyr)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distmod():
# WMAP7 but with Omega_relativisitic = 0
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.hubble_distance, 4258.415596590909 * u.Mpc)
assert allclose(tcos.distmod([1, 5]),
[44.124857, 48.40167258] * u.mag)
assert allclose(tcos.distmod([1., 5.]),
[44.124857, 48.40167258] * u.mag)
@pytest.mark.skipif('not HAS_SCIPY')
def test_neg_distmod():
# Cosmology with negative luminosity distances (perfectly okay,
# if obscure)
tcos = core.LambdaCDM(70, 0.2, 1.3, Tcmb0=0)
assert allclose(tcos.luminosity_distance([50, 100]),
[16612.44047622, -46890.79092244] * u.Mpc)
assert allclose(tcos.distmod([50, 100]),
[46.102167189, 48.355437790944] * u.mag)
@pytest.mark.skipif('not HAS_SCIPY')
def test_critical_density():
# WMAP7 but with Omega_relativistic = 0
# These tests will fail if astropy.const starts returning non-mks
# units by default; see the comment at the top of core.py
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.critical_density0,
9.309668456020899e-30 * u.g / u.cm**3)
assert allclose(tcos.critical_density0,
tcos.critical_density(0))
assert allclose(tcos.critical_density([1, 5]),
[2.70352772e-29, 5.53739080e-28] * u.g / u.cm**3)
assert allclose(tcos.critical_density([1., 5.]),
[2.70352772e-29, 5.53739080e-28] * u.g / u.cm**3)
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_distance_z1z2():
tcos = core.LambdaCDM(100, 0.3, 0.8, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_distance_z1z2((1, 2), (3, 4, 5))
# Comoving distances are invertible
assert allclose(tcos._comoving_distance_z1z2(1, 2),
-tcos._comoving_distance_z1z2(2, 1))
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
results = (3767.90579253,
2386.25591391,
-1381.64987862,
2893.11776663,
174.1524683) * u.Mpc
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
results)
@pytest.mark.skipif('not HAS_SCIPY')
def test_age_in_special_cosmologies():
"""Check that age in de Sitter and Einstein-de Sitter Universes work.
Some analytic solutions fail at these critical points.
"""
c_dS = core.FlatLambdaCDM(100, 0, Tcmb0=0)
assert allclose(c_dS.age(z=0), np.inf * u.Gyr)
assert allclose(c_dS.age(z=1), np.inf * u.Gyr)
assert allclose(c_dS.lookback_time(z=0), 0 * u.Gyr)
assert allclose(c_dS.lookback_time(z=1), 6.777539216261741 * u.Gyr)
c_EdS = core.FlatLambdaCDM(100, 1, Tcmb0=0)
assert allclose(c_EdS.age(z=0), 6.518614811154189 * u.Gyr)
assert allclose(c_EdS.age(z=1), 2.3046783684542738 * u.Gyr)
assert allclose(c_EdS.lookback_time(z=0), 0 * u.Gyr)
assert allclose(c_EdS.lookback_time(z=1), 4.213936442699092 * u.Gyr)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distance_in_special_cosmologies():
"""Check that de Sitter and Einstein-de Sitter Universes both work.
Some analytic solutions fail at these critical points.
"""
c_dS = core.FlatLambdaCDM(100, 0, Tcmb0=0)
assert allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc)
c_EdS = core.FlatLambdaCDM(100, 1, Tcmb0=0)
assert allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc)
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_transverse_distance_z1z2():
tcos = core.FlatLambdaCDM(100, 0.3, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_transverse_distance_z1z2((1, 2), (3, 4, 5))
# Tests that should actually work, target values computed with
# http://www.astro.multivax.de:8000/phillip/angsiz_prog/README.HTML
# Kayser, Helbig, and Schramm (Astron.Astrophys. 318 (1997) 680-686)
assert allclose(tcos._comoving_transverse_distance_z1z2(1, 2),
1313.2232194828466 * u.Mpc)
# In a flat universe comoving distance and comoving transverse
# distance are identical
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
tcos._comoving_transverse_distance_z1z2(z1, z2))
# Test Flat Universe with Omega_M > 1. Rarely used, but perfectly valid.
tcos = core.FlatLambdaCDM(100, 1.5, Tcmb0=0.0)
results = (2202.72682564,
1559.51679971,
-643.21002593,
1408.36365679,
85.09286258) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
# In a flat universe comoving distance and comoving transverse
# distance are identical
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
tcos._comoving_transverse_distance_z1z2(z1, z2))
# Test non-flat cases to avoid simply testing
# comoving_distance_z1z2. Test array, array case.
tcos = core.LambdaCDM(100, 0.3, 0.5, Tcmb0=0.0)
results = (3535.931375645655,
2226.430046551708,
-1208.6817970036532,
2595.567367601969,
151.36592003406884) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
# Test positive curvature with scalar, array combination.
tcos = core.LambdaCDM(100, 1.0, 0.2, Tcmb0=0.0)
z1 = 0.1
z2 = 0, 0.1, 0.2, 0.5, 1.1, 2
results = (-281.31602666724865,
0.,
248.58093707820436,
843.9331377460543,
1618.6104987686672,
2287.5626543279927) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
@pytest.mark.skipif('not HAS_SCIPY')
def test_angular_diameter_distance_z1z2():
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos.angular_diameter_distance_z1z2([1, 2], [3, 4, 5])
# Tests that should actually work
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
646.22968662822018 * u.Mpc)
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
results = (1760.0628637762106,
1670.7497657219858,
-969.34452994,
1159.0970895962193,
115.72768186186921) * u.Mpc
assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2),
results)
z1 = 0.1
z2 = 0.1, 0.2, 0.5, 1.1, 2
results = (0.,
332.09893173,
986.35635069,
1508.37010062,
1621.07937976) * u.Mpc
assert allclose(tcos.angular_diameter_distance_z1z2(0.1, z2),
results)
# Non-flat (positive Ok0) test
tcos = core.LambdaCDM(H0=70.4, Om0=0.2, Ode0=0.5, Tcmb0=0.0)
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
620.1175337852428 * u.Mpc)
# Non-flat (negative Ok0) test
tcos = core.LambdaCDM(H0=100, Om0=2, Ode0=1, Tcmb0=0.0)
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
228.42914659246014 * u.Mpc)
@pytest.mark.skipif('not HAS_SCIPY')
def test_absorption_distance():
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.absorption_distance([1, 3]),
[1.72576635, 7.98685853])
assert allclose(tcos.absorption_distance([1., 3.]),
[1.72576635, 7.98685853])
assert allclose(tcos.absorption_distance(3), 7.98685853)
assert allclose(tcos.absorption_distance(3.), 7.98685853)
@pytest.mark.skipif('not HAS_SCIPY')
def test_massivenu_basic():
# Test no neutrinos case
tcos = core.FlatLambdaCDM(70.4, 0.272, Neff=4.05,
Tcmb0=2.725 * u.K, m_nu=u.Quantity(0, u.eV))
assert allclose(tcos.Neff, 4.05)
assert not tcos.has_massive_nu
mnu = tcos.m_nu
assert len(mnu) == 4
assert mnu.unit == u.eV
assert allclose(mnu, [0.0, 0.0, 0.0, 0.0] * u.eV)
assert allclose(tcos.nu_relative_density(1.), 0.22710731766 * 4.05,
rtol=1e-6)
assert allclose(tcos.nu_relative_density(1), 0.22710731766 * 4.05,
rtol=1e-6)
# Alternative no neutrinos case
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0 * u.K,
m_nu=u.Quantity(0.4, u.eV))
assert not tcos.has_massive_nu
assert tcos.m_nu is None
# Test basic setting, retrieval of values
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.725 * u.K,
m_nu=u.Quantity([0.0, 0.01, 0.02], u.eV))
assert tcos.has_massive_nu
mnu = tcos.m_nu
assert len(mnu) == 3
assert mnu.unit == u.eV
assert allclose(mnu, [0.0, 0.01, 0.02] * u.eV)
# All massive neutrinos case
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.725,
m_nu=u.Quantity(0.1, u.eV), Neff=3.1)
assert allclose(tcos.Neff, 3.1)
assert tcos.has_massive_nu
mnu = tcos.m_nu
assert len(mnu) == 3
assert mnu.unit == u.eV
assert allclose(mnu, [0.1, 0.1, 0.1] * u.eV)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distances():
# Test distance calculations for various special case
# scenarios (no relativistic species, normal, massive neutrinos)
# These do not come from external codes -- they are just internal
# checks to make sure nothing changes if we muck with the distance
# calculators
z = np.array([1.0, 2.0, 3.0, 4.0])
# The pattern here is: no relativistic species, the relativistic
# species with massless neutrinos, then massive neutrinos
cos = core.LambdaCDM(75.0, 0.25, 0.5, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2953.93001902, 4616.7134253, 5685.07765971,
6440.80611897] * u.Mpc, rtol=1e-4)
cos = core.LambdaCDM(75.0, 0.25, 0.6, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3037.12620424, 4776.86236327, 5889.55164479,
6671.85418235] * u.Mpc, rtol=1e-4)
cos = core.LambdaCDM(75.0, 0.3, 0.4, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2471.80626824, 3567.1902565, 4207.15995626,
4638.20476018] * u.Mpc, rtol=1e-4)
# Flat
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3180.83488552, 5060.82054204, 6253.6721173,
7083.5374303] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3180.42662867, 5059.60529655, 6251.62766102,
7080.71698117] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.54183142, 3371.91131264, 3988.40711188,
4409.09346922] * u.Mpc, rtol=1e-4)
# Add w
cos = core.FlatwCDM(75.0, 0.25, w0=-1.05, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3216.8296894, 5117.2097601, 6317.05995437,
7149.68648536] * u.Mpc, rtol=1e-4)
cos = core.FlatwCDM(75.0, 0.25, w0=-0.95, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3143.56537758, 5000.32196494, 6184.11444601,
7009.80166062] * u.Mpc, rtol=1e-4)
cos = core.FlatwCDM(75.0, 0.25, w0=-0.9, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.76035371, 3372.1971387, 3988.71362289,
4409.40817174] * u.Mpc, rtol=1e-4)
# Non-flat w
cos = core.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2849.6163356, 4428.71661565, 5450.97862778,
6179.37072324] * u.Mpc, rtol=1e-4)
cos = core.wCDM(75.0, 0.25, 0.4, w0=-1.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2904.35580229, 4511.11471267, 5543.43643353,
6275.9206788] * u.Mpc, rtol=1e-4)
cos = core.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2473.32522734, 3581.54519631, 4232.41674426,
4671.83818117] * u.Mpc, rtol=1e-4)
# w0wa
cos = core.w0waCDM(75.0, 0.3, 0.6, w0=-0.9, wa=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2937.7807638, 4572.59950903, 5611.52821924,
6339.8549956] * u.Mpc, rtol=1e-4)
cos = core.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2907.34722624, 4539.01723198, 5593.51611281,
6342.3228444] * u.Mpc, rtol=1e-4)
cos = core.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2507.18336722, 3633.33231695, 4292.44746919,
4736.35404638] * u.Mpc, rtol=1e-4)
# Flatw0wa
cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3123.29892781, 4956.15204302, 6128.15563818,
6948.26480378] * u.Mpc, rtol=1e-4)
cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3122.92671907, 4955.03768936, 6126.25719576,
6945.61856513] * u.Mpc, rtol=1e-4)
cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.70072701, 3372.13719963, 3988.6571093,
4409.35399673] * u.Mpc, rtol=1e-4)
# wpwa
cos = core.wpwaCDM(75.0, 0.3, 0.6, wp=-0.9, zp=0.5, wa=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2954.68975298, 4599.83254834, 5643.04013201,
6373.36147627] * u.Mpc, rtol=1e-4)
cos = core.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=0.4, wa=0.1,
Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2919.00656215, 4558.0218123, 5615.73412391,
6366.10224229] * u.Mpc, rtol=1e-4)
cos = core.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=1.0, wa=0.1, Tcmb0=3.0,
Neff=4, m_nu=u.Quantity(5.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2629.48489827, 3874.13392319, 4614.31562397,
5116.51184842] * u.Mpc, rtol=1e-4)
# w0wz
cos = core.w0wzCDM(75.0, 0.3, 0.6, w0=-0.9, wz=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3051.68786716, 4756.17714818, 5822.38084257,
6562.70873734] * u.Mpc, rtol=1e-4)
cos = core.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1,
Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2997.8115653, 4686.45599916, 5764.54388557,
6524.17408738] * u.Mpc, rtol=1e-4)
cos = core.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1, Tcmb0=3.0,
Neff=4, m_nu=u.Quantity(5.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2676.73467639, 3940.57967585, 4686.90810278,
5191.54178243] * u.Mpc, rtol=1e-4)
# Also test different numbers of massive neutrinos
# for FlatLambdaCDM to give the scalar nu density functions a
# work out
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([10.0, 0, 0], u.eV))
assert allclose(cos.comoving_distance(z),
[2777.71589173, 4186.91111666, 5046.0300719,
5636.10397302] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([10.0, 5, 0], u.eV))
assert allclose(cos.comoving_distance(z),
[2636.48149391, 3913.14102091, 4684.59108974,
5213.07557084] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([4.0, 5, 9], u.eV))
assert allclose(cos.comoving_distance(z),
[2563.5093049, 3776.63362071, 4506.83448243,
5006.50158829] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=4.2,
m_nu=u.Quantity([1.0, 4.0, 5, 9], u.eV))
assert allclose(cos.comoving_distance(z),
[2525.58017482, 3706.87633298, 4416.58398847,
4901.96669755] * u.Mpc, rtol=1e-4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_massivenu_density():
# Testing neutrino density calculation
# Simple test cosmology, where we compare rho_nu and rho_gamma
# against the exact formula (eq 24/25 of Komatsu et al. 2011)
# computed using Mathematica. The approximation we use for f(y)
# is only good to ~ 0.5% (with some redshift dependence), so that's
# what we test to.
ztest = np.array([0.0, 1.0, 2.0, 10.0, 1000.0])
nuprefac = 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0)
# First try 3 massive neutrinos, all 100 eV -- note this is a universe
# seriously dominated by neutrinos!
tcos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(100.0, u.eV))
assert tcos.has_massive_nu
assert tcos.Neff == 3
nurel_exp = nuprefac * tcos.Neff * np.array([171969, 85984.5, 57323,
15633.5, 171.801])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
assert allclose(tcos.efunc([0.0, 1.0]), [1.0, 7.46144727668], rtol=5e-3)
# Next, slightly less massive
tcos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.25, u.eV))
nurel_exp = nuprefac * tcos.Neff * np.array([429.924, 214.964, 143.312,
39.1005, 1.11086])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
# For this one also test Onu directly
onu_exp = np.array([0.01890217, 0.05244681, 0.0638236,
0.06999286, 0.1344951])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# And fairly light
tcos = core.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.01, u.eV))
nurel_exp = nuprefac * tcos.Neff * np.array([17.2347, 8.67345, 5.84348,
1.90671, 1.00021])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
onu_exp = np.array([0.00066599, 0.00172677, 0.0020732,
0.00268404, 0.0978313])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
assert allclose(tcos.efunc([1.0, 2.0]), [1.76225893, 2.97022048],
rtol=1e-4)
assert allclose(tcos.inv_efunc([1.0, 2.0]), [0.5674535, 0.33667534],
rtol=1e-4)
# Now a mixture of neutrino masses, with non-integer Neff
tcos = core.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3.04,
m_nu=u.Quantity([0.0, 0.01, 0.25], u.eV))
nurel_exp = nuprefac * tcos.Neff * \
np.array([149.386233, 74.87915, 50.0518,
14.002403, 1.03702333])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
onu_exp = np.array([0.00584959, 0.01493142, 0.01772291,
0.01963451, 0.10227728])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# Integer redshifts
ztest = ztest.astype(int)
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
@pytest.mark.skipif('not HAS_SCIPY')
def test_z_at_value():
# These are tests of expected values, and hence have less precision
# than the roundtrip tests below (test_z_at_value_roundtrip);
# here we have to worry about the cosmological calculations
# giving slightly different values on different architectures,
# there we are checking internal consistency on the same architecture
# and so can be more demanding
z_at_value = funcs.z_at_value
cosmo = core.Planck13
d = cosmo.luminosity_distance(3)
assert allclose(z_at_value(cosmo.luminosity_distance, d), 3,
rtol=1e-8)
assert allclose(z_at_value(cosmo.age, 2 * u.Gyr), 3.198122684356,
rtol=1e-6)
assert allclose(z_at_value(cosmo.luminosity_distance, 1e4 * u.Mpc),
1.3685790653802761, rtol=1e-6)
assert allclose(z_at_value(cosmo.lookback_time, 7 * u.Gyr),
0.7951983674601507, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc,
zmax=2), 0.68127769625288614, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc,
zmin=2.5), 3.7914908028272083, rtol=1e-6)
assert allclose(z_at_value(cosmo.distmod, 46 * u.mag),
1.9913891680278133, rtol=1e-6)
# test behaviour when the solution is outside z limits (should
# raise a CosmologyError)
with pytest.raises(core.CosmologyError):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmax=0.5)
with pytest.raises(core.CosmologyError):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmin=4.)
@pytest.mark.skipif('not HAS_SCIPY')
def test_z_at_value_roundtrip():
"""
Calculate values from a known redshift, and then check that
z_at_value returns the right answer.
"""
z = 0.5
# Skip Ok, w, de_density_scale because in the Planck13 cosmology
# they are redshift independent and hence uninvertable,
# *_distance_z1z2 methods take multiple arguments, so require
# special handling
# clone isn't a redshift-dependent method
skip = ('Ok',
'angular_diameter_distance_z1z2',
'clone',
'de_density_scale', 'w')
import inspect
methods = inspect.getmembers(core.Planck13, predicate=inspect.ismethod)
for name, func in methods:
if name.startswith('_') or name in skip:
continue
print('Round-trip testing {0}'.format(name))
fval = func(z)
# we need zmax here to pick the right solution for
# angular_diameter_distance and related methods.
# Be slightly more generous with rtol than the default 1e-8
# used in z_at_value
assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5),
rtol=2e-8)
# Test distance functions between two redshifts
z2 = 2.0
func_z1z2 = [lambda z1: core.Planck13._comoving_distance_z1z2(z1, z2),
lambda z1:
core.Planck13._comoving_transverse_distance_z1z2(z1, z2),
lambda z1:
core.Planck13.angular_diameter_distance_z1z2(z1, z2)]
for func in func_z1z2:
fval = func(z)
assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5),
rtol=2e-8)
|
ea613b38b76cd957ab95f3cf0577c910ebc954ce650cced8c77b68b81de175d5 | # Load the WCS information from a fits header, and use it
# to convert pixel coordinates to world coordinates.
import numpy as np
from astropy import wcs
from astropy.io import fits
import sys
def load_wcs_from_file(filename):
# Load the FITS hdulist using astropy.io.fits
hdulist = fits.open(filename)
# Parse the WCS keywords in the primary HDU
w = wcs.WCS(hdulist[0].header)
# Print out the "name" of the WCS, as defined in the FITS header
print(w.wcs.name)
# Print out all of the settings that were parsed from the header
w.wcs.print_contents()
# Three pixel coordinates of interest.
# Note we've silently assumed a NAXIS=2 image here
pixcrd = np.array([[0, 0], [24, 38], [45, 98]], np.float_)
# Convert pixel coordinates to world coordinates
# The second argument is "origin" -- in this case we're declaring we
# have 1-based (Fortran-like) coordinates.
world = w.wcs_pix2world(pixcrd, 1)
print(world)
# Convert the same coordinates back to pixel coordinates.
pixcrd2 = w.wcs_world2pix(world, 1)
print(pixcrd2)
# These should be the same as the original pixel coordinates, modulo
# some floating-point error.
assert np.max(np.abs(pixcrd - pixcrd2)) < 1e-6
if __name__ == '__main__':
load_wcs_from_file(sys.argv[-1])
|
5caa8a62e3f3333c666adac05d0abd668d7da339185cc6bfff37abbad0fac9e0 | # Set the WCS information manually by setting properties of the WCS
# object.
import numpy as np
from astropy import wcs
from astropy.io import fits
# Create a new WCS object. The number of axes must be set
# from the start
w = wcs.WCS(naxis=2)
# Set up an "Airy's zenithal" projection
# Vector properties may be set with Python lists, or Numpy arrays
w.wcs.crpix = [-234.75, 8.3393]
w.wcs.cdelt = np.array([-0.066667, 0.066667])
w.wcs.crval = [0, -90]
w.wcs.ctype = ["RA---AIR", "DEC--AIR"]
w.wcs.set_pv([(2, 1, 45.0)])
# Some pixel coordinates of interest.
pixcrd = np.array([[0, 0], [24, 38], [45, 98]], np.float_)
# Convert pixel coordinates to world coordinates
world = w.wcs_pix2world(pixcrd, 1)
print(world)
# Convert the same coordinates back to pixel coordinates.
pixcrd2 = w.wcs_world2pix(world, 1)
print(pixcrd2)
# These should be the same as the original pixel coordinates, modulo
# some floating-point error.
assert np.max(np.abs(pixcrd - pixcrd2)) < 1e-6
# Now, write out the WCS object as a FITS header
header = w.to_header()
# header is an astropy.io.fits.Header object. We can use it to create a new
# PrimaryHDU and write it to a file.
hdu = fits.PrimaryHDU(header=header)
# Save to FITS file
# hdu.writeto('test.fits')
|
3b967ebccb06a8e046c0c8fbe2ae4db9386d7fc40d5076154805d1616ed13651 | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
# This is the same check as astropy/__init__.py but this one has to
# happen before importing ah_bootstrap
__minimum_python_version__ = '3.5'
if sys.version_info < tuple((int(val) for val in __minimum_python_version__.split('.'))):
sys.stderr.write("ERROR: Astropy requires Python {} or later\n".format(
__minimum_python_version__))
sys.exit(1)
import os
import glob
import ah_bootstrap
from setuptools import setup
from astropy_helpers.setup_helpers import (
register_commands, get_package_info, get_debug_option)
from astropy_helpers.distutils_helpers import is_distutils_display_option
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
import astropy
NAME = 'astropy'
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
VERSION = '3.1.dev'
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
# Populate the dict of setup command overrides; this should be done before
# invoking any other functionality from distutils since it can potentially
# modify distutils' behavior.
cmdclassd = register_commands(NAME, VERSION, RELEASE)
# Freeze build information in version.py
generate_version_py(NAME, VERSION, RELEASE, get_debug_option(NAME),
uses_git=not RELEASE)
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault('astropy', []).append('data/*')
# Add any necessary entry points
entry_points = {}
# Command-line scripts
entry_points['console_scripts'] = [
'fits2bitmap = astropy.visualization.scripts.fits2bitmap:main',
'fitscheck = astropy.io.fits.scripts.fitscheck:main',
'fitsdiff = astropy.io.fits.scripts.fitsdiff:main',
'fitsheader = astropy.io.fits.scripts.fitsheader:main',
'fitsinfo = astropy.io.fits.scripts.fitsinfo:main',
'samp_hub = astropy.samp.hub_script:hub_script',
'showtable = astropy.table.scripts.showtable:main',
'volint = astropy.io.votable.volint:main',
'wcslint = astropy.wcs.wcslint:main',
]
# Register ASDF extensions
entry_points['asdf_extensions'] = [
'astropy = astropy.io.misc.asdf.extension:AstropyExtension',
'astropy-asdf = astropy.io.misc.asdf.extension:AstropyAsdfExtension',
]
min_numpy_version = 'numpy>=' + astropy.__minimum_numpy_version__
setup_requires = [min_numpy_version]
# Make sure to have the packages needed for building astropy, but do not require them
# when installing from an sdist as the c files are included there.
if not os.path.exists(os.path.join(os.path.dirname(__file__), 'PKG-INFO')):
setup_requires.extend(['cython>=0.21', 'jinja2>=2.7'])
install_requires = [min_numpy_version]
extras_require = {
'test': ['pytest-astropy']
}
# Avoid installing setup_requires dependencies if the user just
# queries for information
if is_distutils_display_option():
setup_requires = []
setup(name=NAME,
version=VERSION,
description='Community-developed python astronomy tools',
requires=['numpy'], # scipy not required, but strongly recommended
setup_requires=setup_requires,
install_requires=install_requires,
extras_require=extras_require,
provides=[NAME],
author='The Astropy Developers',
author_email='[email protected]',
license='BSD',
url='http://astropy.org',
long_description=astropy.__doc__,
keywords=['astronomy', 'astrophysics', 'cosmology', 'space', 'science',
'units', 'table', 'wcs', 'samp', 'coordinate', 'fits',
'modeling', 'models', 'fitting', 'ascii'],
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: C',
'Programming Language :: Cython',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Physics'
],
cmdclass=cmdclassd,
zip_safe=False,
entry_points=entry_points,
python_requires='>=' + __minimum_python_version__,
tests_require=['pytest-astropy'],
**package_info
)
|
8cb9941f0eeb9b24157afb62bff779eb74915ac55918110c19574ee200fe1335 | """
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken and by default the system-installed version
of astropy-helpers will be used (however, ``ah_bootstrap.use_astropy_helpers``
may be called manually from within the setup.py script).
This behavior can also be controlled using the ``--auto-use`` and
``--no-auto-use`` command-line flags. For clarity, an alias for
``--no-auto-use`` is ``--use-system-astropy-helpers``, and we recommend using
the latter if needed.
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import imp
import io
import locale
import os
import re
import subprocess as sp
import sys
try:
from ConfigParser import ConfigParser, RawConfigParser
except ImportError:
from configparser import ConfigParser, RawConfigParser
_str_types = (str, bytes)
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Some pre-setuptools checks to ensure that either distribute or setuptools >=
# 0.7 is used (over pre-distribute setuptools) if it is available on the path;
# otherwise the latest setuptools will be downloaded and bootstrapped with
# ``ez_setup.py``. This used to be included in a separate file called
# setuptools_bootstrap.py; but it was combined into ah_bootstrap.py
try:
import pkg_resources
_setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7')
# This may raise a DistributionNotFound in which case no version of
# setuptools or distribute is properly installed
_setuptools = pkg_resources.get_distribution('setuptools')
if _setuptools not in _setuptools_req:
# Older version of setuptools; check if we have distribute; again if
# this results in DistributionNotFound we want to give up
_distribute = pkg_resources.get_distribution('distribute')
if _setuptools != _distribute:
# It's possible on some pathological systems to have an old version
# of setuptools and distribute on sys.path simultaneously; make
# sure distribute is the one that's used
sys.path.insert(1, _distribute.location)
_distribute.activate()
imp.reload(pkg_resources)
except:
# There are several types of exceptions that can occur here; if all else
# fails bootstrap and use the bootstrapped version
from ez_setup import use_setuptools
use_setuptools()
# typing as a dependency for 1.6.1+ Sphinx causes issues when imported after
# initializing submodule with ah_boostrap.py
# See discussion and references in
# https://github.com/astropy/astropy-helpers/issues/302
try:
import typing # noqa
except ImportError:
pass
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat # noqa
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
# In case it didn't successfully import before the ez_setup checks
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
from distutils import log
from distutils.debug import DEBUG
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
UPPER_VERSION_EXCLUSIVE = None
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if not isinstance(path, str):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
try:
cfg.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
return {}
if not cfg.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not cfg.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = cfg.getboolean('ah_bootstrap', option)
else:
value = cfg.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
if '--auto-use' in argv:
config['auto_use'] = True
argv.remove('--auto-use')
if '--no-auto-use' in argv:
config['auto_use'] = False
argv.remove('--no-auto-use')
if '--use-system-astropy-helpers' in argv:
config['auto_use'] = False
argv.remove('--use-system-astropy-helpers')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# First, remove any previously imported versions of astropy_helpers;
# this is necessary for nested installs where one package's installer
# is installing another package via setuptools.sandbox.run_setup, as in
# the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return None
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
# We use subprocess instead of run_setup from setuptools to
# avoid segmentation faults - see the following for more details:
# https://github.com/cython/cython/issues/2104
sp.check_output([sys.executable, 'setup.py', 'egg_info'], cwd=path)
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
if allow_hosts is not None:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
if UPPER_VERSION_EXCLUSIVE is None:
req = DIST_NAME
else:
req = '{0}<{1}'.format(DIST_NAME, UPPER_VERSION_EXCLUSIVE)
attrs = {'setup_requires': [req]}
# NOTE: we need to parse the config file (e.g. setup.cfg) to make sure
# it honours the options set in the [easy_install] section, and we need
# to explicitly fetch the requirement eggs as setup_requires does not
# get honored in recent versions of setuptools:
# https://github.com/pypa/setuptools/issues/1273
try:
context = _verbose if DEBUG else _silence
with context():
dist = _Distribution(attrs=attrs)
try:
dist.parse_config_files(ignore_option_errors=True)
dist.fetch_build_eggs(req)
except TypeError:
# On older versions of setuptools, ignore_option_errors
# doesn't exist, and the above two lines are not needed
# so we can just continue
pass
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBootstrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBootstrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, str):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, str):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _verbose():
yield
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
|
692fc16e7d309ca57d6e2d47aa234c0f9047b379386a7e6970345a4e518c40a0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astropy is a package intended to contain core functionality and some
common tools needed for performing astronomy and astrophysics research with
Python. It also provides an index for other astronomy packages and tools for
managing them.
"""
import sys
import os
from warnings import warn
__minimum_numpy_version__ = '1.10.0'
class UnsupportedPythonError(Exception):
pass
# This is the same check as the one at the top of setup.py
__minimum_python_version__ = '3.5'
if sys.version_info < tuple((int(val) for val in __minimum_python_version__.split('.'))):
raise UnsupportedPythonError("Astropy does not support Python < {}".format(__minimum_python_version__))
def _is_astropy_source(path=None):
"""
Returns whether the source for this module is directly in an astropy
source distribution or checkout.
"""
# If this __init__.py file is in ./astropy/ then import is within a source
# dir .astropy-root is a file distributed with the source, but that should
# not installed
if path is None:
path = os.path.join(os.path.dirname(__file__), os.pardir)
elif os.path.isfile(path):
path = os.path.dirname(path)
source_dir = os.path.abspath(path)
return os.path.exists(os.path.join(source_dir, '.astropy-root'))
def _is_astropy_setup():
"""
Returns whether we are currently being imported in the context of running
Astropy's setup.py.
"""
main_mod = sys.modules.get('__main__')
if not main_mod:
return False
return (getattr(main_mod, '__file__', False) and
os.path.basename(main_mod.__file__).rstrip('co') == 'setup.py' and
_is_astropy_source(main_mod.__file__))
# this indicates whether or not we are in astropy's setup.py
try:
_ASTROPY_SETUP_
except NameError:
from sys import version_info
import builtins
# This will set the _ASTROPY_SETUP_ to True by default if
# we are running Astropy's setup.py
builtins._ASTROPY_SETUP_ = _is_astropy_setup()
try:
from .version import version as __version__
except ImportError:
# TODO: Issue a warning using the logging framework
__version__ = ''
try:
from .version import githash as __githash__
except ImportError:
# TODO: Issue a warning using the logging framework
__githash__ = ''
# The location of the online documentation for astropy
# This location will normally point to the current released version of astropy
if 'dev' in __version__:
online_docs_root = 'http://docs.astropy.org/en/latest/'
else:
online_docs_root = 'http://docs.astropy.org/en/{0}/'.format(__version__)
def _check_numpy():
"""
Check that Numpy is installed and it is of the minimum version we
require.
"""
# Note: We could have used distutils.version for this comparison,
# but it seems like overkill to import distutils at runtime.
requirement_met = False
try:
import numpy
except ImportError:
pass
else:
from .utils import minversion
requirement_met = minversion(numpy, __minimum_numpy_version__)
if not requirement_met:
msg = ("Numpy version {0} or later must be installed to use "
"Astropy".format(__minimum_numpy_version__))
raise ImportError(msg)
return numpy
if not _ASTROPY_SETUP_:
_check_numpy()
from . import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy`.
"""
unicode_output = _config.ConfigItem(
False,
'When True, use Unicode characters when outputting values, and '
'displaying widgets at the console.')
use_color = _config.ConfigItem(
sys.platform != 'win32',
'When True, use ANSI color escape sequences when writing to the console.',
aliases=['astropy.utils.console.USE_COLOR', 'astropy.logger.USE_COLOR'])
max_lines = _config.ConfigItem(
None,
description='Maximum number of lines in the display of pretty-printed '
'objects. If not provided, try to determine automatically from the '
'terminal size. Negative numbers mean no limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_lines'])
max_width = _config.ConfigItem(
None,
description='Maximum number of characters per line in the display of '
'pretty-printed objects. If not provided, try to determine '
'automatically from the terminal size. Negative numbers mean no '
'limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_width'])
conf = Conf()
# Create the test() function
from .tests.runner import TestRunner
test = TestRunner.make_test_runner_in(__path__[0])
# if we are *not* in setup mode, import the logger and possibly populate the
# configuration file with the defaults
def _initialize_astropy():
from . import config
def _rollback_import(message):
log.error(message)
# Now disable exception logging to avoid an annoying error in the
# exception logger before we raise the import error:
_teardown_log()
# Roll back any astropy sub-modules that have been imported thus
# far
for key in list(sys.modules):
if key.startswith('astropy.'):
del sys.modules[key]
raise ImportError('astropy')
try:
from .utils import _compiler
except ImportError:
if _is_astropy_source():
log.warning('You appear to be trying to import astropy from '
'within a source checkout without building the '
'extension modules first. Attempting to (re)build '
'extension modules:')
try:
_rebuild_extensions()
except BaseException as exc:
_rollback_import(
'An error occurred while attempting to rebuild the '
'extension modules. Please try manually running '
'`./setup.py develop` or `./setup.py build_ext '
'--inplace` to see what the issue was. Extension '
'modules must be successfully compiled and importable '
'in order to import astropy.')
# Reraise the Exception only in case it wasn't an Exception,
# for example if a "SystemExit" or "KeyboardInterrupt" was
# invoked.
if not isinstance(exc, Exception):
raise
else:
# Outright broken installation; don't be nice.
raise
# add these here so we only need to cleanup the namespace at the end
config_dir = os.path.dirname(__file__)
try:
config.configuration.update_default_config(__package__, config_dir)
except config.configuration.ConfigurationDefaultMissingError as e:
wmsg = (e.args[0] + " Cannot install default profile. If you are "
"importing from source, this is expected.")
warn(config.configuration.ConfigurationDefaultMissingWarning(wmsg))
def _rebuild_extensions():
global __version__
global __githash__
import subprocess
import time
from .utils.console import Spinner
devnull = open(os.devnull, 'w')
old_cwd = os.getcwd()
os.chdir(os.path.join(os.path.dirname(__file__), os.pardir))
try:
sp = subprocess.Popen([sys.executable, 'setup.py', 'build_ext',
'--inplace'], stdout=devnull,
stderr=devnull)
with Spinner('Rebuilding extension modules') as spinner:
while sp.poll() is None:
next(spinner)
time.sleep(0.05)
finally:
os.chdir(old_cwd)
devnull.close()
if sp.returncode != 0:
raise OSError('Running setup.py build_ext --inplace failed '
'with error code {0}: try rerunning this command '
'manually to check what the error was.'.format(
sp.returncode))
# Try re-loading module-level globals from the astropy.version module,
# which may not have existed before this function ran
try:
from .version import version as __version__
except ImportError:
pass
try:
from .version import githash as __githash__
except ImportError:
pass
# Set the bibtex entry to the article referenced in CITATION
def _get_bibtex():
import re
if os.path.exists('CITATION'):
with open('CITATION', 'r') as citation:
refs = re.findall(r'\{[^()]*\}', citation.read())
if len(refs) == 0: return ''
bibtexreference = "@ARTICLE{0}".format(refs[0])
return bibtexreference
else:
return ''
__bibtex__ = _get_bibtex()
import logging
# Use the root logger as a dummy log before initilizing Astropy's logger
log = logging.getLogger()
if not _ASTROPY_SETUP_:
from .logger import _init_log, _teardown_log
log = _init_log()
_initialize_astropy()
from .utils.misc import find_api_page
def online_help(query):
"""
Search the online Astropy documentation for the given query.
Opens the results in the default web browser. Requires an active
Internet connection.
Parameters
----------
query : str
The search query.
"""
from urllib.parse import urlencode
import webbrowser
version = __version__
if 'dev' in version:
version = 'latest'
else:
version = 'v' + version
url = 'http://docs.astropy.org/en/{0}/search.html?{1}'.format(
version, urlencode({'q': query}))
webbrowser.open(url)
__dir__ = ['__version__', '__githash__', '__minimum_numpy_version__',
'__bibtex__', 'test', 'log', 'find_api_page', 'online_help',
'online_docs_root', 'conf']
from types import ModuleType as __module_type__
# Clean up top-level namespace--delete everything that isn't in __dir__
# or is a magic attribute, and that isn't a submodule of this package
for varname in dir():
if not ((varname.startswith('__') and varname.endswith('__')) or
varname in __dir__ or
(varname[0] != '_' and
isinstance(locals()[varname], __module_type__) and
locals()[varname].__name__.startswith(__name__ + '.'))):
# The last clause in the the above disjunction deserves explanation:
# When using relative imports like ``from .. import config``, the
# ``config`` variable is automatically created in the namespace of
# whatever module ``..`` resolves to (in this case astropy). This
# happens a few times just in the module setup above. This allows
# the cleanup to keep any public submodules of the astropy package
del locals()[varname]
del varname, __module_type__
|
be25ddfa509d2650c90158e1034f7d0236e6fc8dff0a5298a1e34cb75bde4936 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains pytest configuration settings that are astropy-specific
(i.e. those that would not necessarily be shared by affiliated packages
making use of astropy's test runner).
"""
from importlib.util import find_spec
from astropy.tests.plugins.display import PYTEST_HEADER_MODULES
from astropy.tests.helper import enable_deprecations_as_exceptions
if find_spec('asdf') is not None:
from asdf import __version__ as asdf_version
if asdf_version >= '2.0.0':
pytest_plugins = ['asdf.tests.schema_tester']
PYTEST_HEADER_MODULES['Asdf'] = 'asdf'
enable_deprecations_as_exceptions(
include_astropy_deprecations=False,
# This is a workaround for the OpenSSL deprecation warning that comes from
# the `requests` module. It only appears when both asdf and sphinx are
# installed. This can be removed once pyopenssl 1.7.20+ is released.
modules_to_ignore_on_import=['requests'])
try:
import matplotlib
except ImportError:
pass
else:
matplotlib.use('Agg')
PYTEST_HEADER_MODULES['Cython'] = 'cython'
|
553699d484faa9d99797883d6a8f109d4a5ad69e5a484eaab565d62f0d058dc7 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_docs" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
from datetime import datetime
import os
import sys
import astropy
try:
from sphinx_astropy.conf.v1 import * # noqa
except ImportError:
print('ERROR: the documentation requires the sphinx-astropy package to be installed')
sys.exit(1)
plot_rcparams = {}
plot_rcparams['figure.figsize'] = (6, 6)
plot_rcparams['savefig.facecolor'] = 'none'
plot_rcparams['savefig.bbox'] = 'tight'
plot_rcparams['axes.labelsize'] = 'large'
plot_rcparams['figure.subplot.hspace'] = 0.5
plot_apply_rcparams = True
plot_html_show_source_link = False
plot_formats = ['png', 'svg', 'pdf']
# Don't use the default - which includes a numpy and matplotlib import
plot_pre_code = ""
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.1'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
check_sphinx_version("1.2.1")
# The intersphinx_mapping in astropy_helpers.sphinx.conf refers to astropy for
# the benefit of affiliated packages who want to refer to objects in the
# astropy core. However, we don't want to cyclically reference astropy in its
# own build so we remove it here.
del intersphinx_mapping['astropy']
# add any custom intersphinx for astropy
intersphinx_mapping['pytest'] = ('https://docs.pytest.org/en/latest/', None)
intersphinx_mapping['ipython'] = ('http://ipython.readthedocs.io/en/stable/', None)
intersphinx_mapping['pandas'] = ('http://pandas.pydata.org/pandas-docs/stable/', None)
intersphinx_mapping['sphinx_automodapi'] = ('https://sphinx-automodapi.readthedocs.io/en/stable/', None)
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
exclude_patterns.append('_pkgtemplate.rst')
# Add any paths that contain templates here, relative to this directory.
if 'templates_path' not in locals(): # in case parent conf.py defines it
templates_path = []
templates_path.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
.. |minimum_numpy_version| replace:: {0.__minimum_numpy_version__}
.. Astropy
.. _Astropy: http://astropy.org
.. _`Astropy mailing list`: https://mail.python.org/mailman/listinfo/astropy
.. _`astropy-dev mailing list`: http://groups.google.com/group/astropy-dev
""".format(astropy)
# -- Project information ------------------------------------------------------
project = u'Astropy'
author = u'The Astropy Developers'
copyright = u'2011–{0}, '.format(datetime.utcnow().year) + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = astropy.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = astropy.__version__
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
#
# The global astropy configuration uses a custom theme,
# 'bootstrap-astropy', which is installed along with astropy. The
# theme has options for controlling the text of the logo in the upper
# left corner. This is how you would specify the options in order to
# override the theme defaults (The following options *are* the
# defaults, so we do not actually need to set them here.)
#html_theme_options = {
# 'logotext1': 'astro', # white, semi-bold
# 'logotext2': 'py', # orange, light
# 'logotext3': ':docs' # white, light
# }
# A different theme can be used, or other parts of this theme can be
# modified, by overriding some of the variables set in the global
# configuration. The variables set in the global configuration are
# listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
latex_logo = '_static/astropy_logo.pdf'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# -- Options for the edit_on_github extension ----------------------------------------
extensions += ['sphinx_astropy.ext.edit_on_github']
# Don't import the module as "version" or it will override the
# "version" configuration parameter
from astropy import version as versionmod
edit_on_github_project = "astropy/astropy"
if versionmod.release:
edit_on_github_branch = "v{0}.{1}.x".format(
versionmod.major, versionmod.minor)
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
edit_on_github_skip_regex = '_.*|api/.*'
github_issues_url = 'https://github.com/astropy/astropy/issues/'
# Enable nitpicky mode - which ensures that all references in the docs
# resolve.
nitpicky = True
nitpick_ignore = []
for line in open('nitpick-exceptions'):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
target = target.strip()
nitpick_ignore.append((dtype, target))
# -- Options for the Sphinx gallery -------------------------------------------
try:
import sphinx_gallery
extensions += ["sphinx_gallery.gen_gallery"]
sphinx_gallery_conf = {
'backreferences_dir': 'generated/modules', # path to store the module using example template
'filename_pattern': '^((?!skip_).)*$', # execute all examples except those that start with "skip_"
'examples_dirs': '..{}examples'.format(os.sep), # path to the examples scripts
'gallery_dirs': 'generated/examples', # path to save gallery generated examples
'reference_url': {
'astropy': None,
'matplotlib': 'http://matplotlib.org/',
'numpy': 'http://docs.scipy.org/doc/numpy/',
},
'abort_on_example_error': True
}
except ImportError:
def setup(app):
app.warn('The sphinx_gallery extension is not installed, so the '
'gallery will not be built. You will probably see '
'additional warnings about undefined references due '
'to this.')
linkcheck_anchors = False
|
226f32952ebef13b9fa1f9a87966cb63a48116fdee16e5a6d0f7f9f2bc0a0e9a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import warnings
from ..utils.exceptions import AstropyUserWarning
from ..utils import isiterable
__all__ = ['SigmaClip', 'sigma_clip', 'sigma_clipped_stats']
class SigmaClip:
"""
Class to perform sigma clipping.
The data will be iterated over, each time rejecting points that are
discrepant by more than a specified number of standard deviations
from a center value. If the data contains invalid values (NaNs or
infs), they are automatically masked before performing the sigma
clipping.
For a functional interface to sigma clipping, see
:func:`sigma_clip`.
.. note::
`scipy.stats.sigmaclip
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_
provides a subset of the functionality in this class.
Parameters
----------
sigma : float, optional
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. Defaults to 3.
sigma_lower : float or `None`, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. Defaults to `None`.
sigma_upper : float or `None`, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. Defaults to `None`.
iters : int or `None`, optional
The number of iterations to perform sigma clipping, or `None` to
clip until convergence is achieved (i.e., continue until the
last iteration clips nothing). Defaults to 5.
cenfunc : callable, optional
The function used to compute the center for the clipping. Must
be a callable that takes in a masked array and outputs the
central value. Defaults to the median (`numpy.ma.median`).
stdfunc : callable, optional
The function used to compute the standard deviation about the
center. Must be a callable that takes in a masked array and
outputs a width estimator. Masked (rejected) pixels are those
where::
deviation < (-sigma_lower * stdfunc(deviation))
deviation > (sigma_upper * stdfunc(deviation))
where::
deviation = data - cenfunc(data [,axis=int])
Defaults to the standard deviation (`numpy.std`).
See Also
--------
sigma_clip
Examples
--------
This example generates random variates from a Gaussian distribution
and returns a masked array in which all points that are more than 2
sample standard deviations from the median are masked::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> sigclip = SigmaClip(sigma=2, iters=5)
>>> filtered_data = sigclip(randvar)
This example sigma clips on a similar distribution, but uses 3 sigma
relative to the sample *mean*, clips until convergence, and does not
copy the data::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> sigclip = SigmaClip(sigma=3, iters=None, cenfunc=mean)
>>> filtered_data = sigclip(randvar, copy=False)
This example sigma clips along one axis on a similar distribution
(with bad points inserted)::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> sigclip = SigmaClip(sigma=2.3)
>>> filtered_data = sigclip(data, axis=0)
Note that along the other axis, no points would be masked, as the
variance is higher.
"""
def __init__(self, sigma=3., sigma_lower=None, sigma_upper=None, iters=5,
cenfunc=np.ma.median, stdfunc=np.std):
self.sigma = sigma
self.sigma_lower = sigma_lower
self.sigma_upper = sigma_upper
self.iters = iters
self.cenfunc = cenfunc
self.stdfunc = stdfunc
def __repr__(self):
return ('SigmaClip(sigma={0}, sigma_lower={1}, sigma_upper={2}, '
'iters={3}, cenfunc={4}, stdfunc={5})'
.format(self.sigma, self.sigma_lower, self.sigma_upper,
self.iters, self.cenfunc, self.stdfunc))
def __str__(self):
lines = ['<' + self.__class__.__name__ + '>']
attrs = ['sigma', 'sigma_lower', 'sigma_upper', 'iters', 'cenfunc',
'stdfunc']
for attr in attrs:
lines.append(' {0}: {1}'.format(attr, getattr(self, attr)))
return '\n'.join(lines)
def _perform_clip(self, _filtered_data, axis=None):
"""
Perform sigma clip by comparing the data to the minimum and
maximum values (median + sig * standard deviation). Use
sigma_lower and sigma_upper to get the correct limits. Data
values less or greater than the minimum / maximum values
will have True set in the mask array.
"""
if _filtered_data.size == 0:
return _filtered_data
max_value = self.cenfunc(_filtered_data, axis=axis)
std = self.stdfunc(_filtered_data, axis=axis)
min_value = max_value - std * self.sigma_lower
max_value += std * self.sigma_upper
# Ensure min/max can be broadcast with the data (if arrays):
if axis is not None:
if not isiterable(axis):
axis = (axis,)
# Convert negative indices & restore reduced axes, with length 1:
axis = tuple(_filtered_data.ndim + n if n < 0 else n for n in axis)
mshape = tuple(1 if dim in axis else sz
for dim, sz in enumerate(_filtered_data.shape))
min_value = min_value.reshape(mshape)
max_value = max_value.reshape(mshape)
if max_value is np.ma.masked:
max_value = np.ma.MaskedArray(np.nan, mask=True)
min_value = np.ma.MaskedArray(np.nan, mask=True)
_filtered_data.mask |= _filtered_data > max_value
_filtered_data.mask |= _filtered_data < min_value
return _filtered_data
def __call__(self, data, axis=None, copy=True):
"""
Perform sigma clipping on the provided data.
Parameters
----------
data : array-like
The data to be sigma clipped.
axis : `None` or int or tuple of int, optional
If not `None`, clip along the given axis or axes. For this case,
``axis`` will be passed on to ``cenfunc`` and ``stdfunc``, which
are expected to return an array with the axis dimension(s) removed
(like the numpy functions). If `None`, clip over all axes.
Defaults to `None`.
copy : bool, optional
If `True`, the ``data`` array will be copied. If `False`,
the returned masked array data will contain the same array
as ``data``. Defaults to `True`.
Returns
-------
filtered_data : `numpy.ma.MaskedArray`
A masked array with the same shape as ``data`` input, where
the points rejected by the algorithm have been masked.
"""
if self.sigma_lower is None:
self.sigma_lower = self.sigma
if self.sigma_upper is None:
self.sigma_upper = self.sigma
if np.any(~np.isfinite(data)):
data = np.ma.masked_invalid(data)
warnings.warn('Input data contains invalid values (NaNs or '
'infs), which were automatically masked.',
AstropyUserWarning)
filtered_data = np.ma.array(data, copy=copy)
if self.iters is None:
lastrej = filtered_data.count() + 1
while filtered_data.count() != lastrej:
lastrej = filtered_data.count()
self._perform_clip(filtered_data, axis=axis)
else:
for i in range(self.iters):
self._perform_clip(filtered_data, axis=axis)
# prevent filtered_data.mask = False (scalar) if no values are clipped
if filtered_data.mask.shape == ():
# make .mask shape match .data shape
filtered_data.mask = False
return filtered_data
def sigma_clip(data, sigma=3, sigma_lower=None, sigma_upper=None, iters=5,
cenfunc=np.ma.median, stdfunc=np.std, axis=None, copy=True):
"""
Perform sigma-clipping on the provided data.
The data will be iterated over, each time rejecting points that are
discrepant by more than a specified number of standard deviations from a
center value. If the data contains invalid values (NaNs or infs),
they are automatically masked before performing the sigma clipping.
For an object-oriented interface to sigma clipping, see
:func:`SigmaClip`.
.. note::
`scipy.stats.sigmaclip
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_
provides a subset of the functionality in this function.
Parameters
----------
data : array-like
The data to be sigma clipped.
sigma : float, optional
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. Defaults to 3.
sigma_lower : float or `None`, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. Defaults to `None`.
sigma_upper : float or `None`, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. Defaults to `None`.
iters : int or `None`, optional
The number of iterations to perform sigma clipping, or `None` to
clip until convergence is achieved (i.e., continue until the
last iteration clips nothing). Defaults to 5.
cenfunc : callable, optional
The function used to compute the center for the clipping. Must
be a callable that takes in a masked array and outputs the
central value. Defaults to the median (`numpy.ma.median`).
stdfunc : callable, optional
The function used to compute the standard deviation about the
center. Must be a callable that takes in a masked array and
outputs a width estimator. Masked (rejected) pixels are those
where::
deviation < (-sigma_lower * stdfunc(deviation))
deviation > (sigma_upper * stdfunc(deviation))
where::
deviation = data - cenfunc(data [,axis=int])
Defaults to the standard deviation (`numpy.std`).
axis : `None` or int or tuple of int, optional
If not `None`, clip along the given axis or axes. For this case,
``axis`` will be passed on to ``cenfunc`` and ``stdfunc``, which
are expected to return an array with the axis dimension(s) removed
(like the numpy functions). If `None`, clip over all axes.
Defaults to `None`.
copy : bool, optional
If `True`, the ``data`` array will be copied. If `False`, the
returned masked array data will contain the same array as
``data``. Defaults to `True`.
Returns
-------
filtered_data : `numpy.ma.MaskedArray`
A masked array with the same shape as ``data`` input, where the
points rejected by the algorithm have been masked.
Notes
-----
1. The routine works by calculating::
deviation = data - cenfunc(data [,axis=int])
and then setting a mask for points outside the range::
deviation < (-sigma_lower * stdfunc(deviation))
deviation > (sigma_upper * stdfunc(deviation))
It will iterate a given number of times, or until no further
data are rejected.
2. Most numpy functions deal well with masked arrays, but if one
would like to have an array with just the good (or bad) values, one
can use::
good_only = filtered_data.data[~filtered_data.mask]
bad_only = filtered_data.data[filtered_data.mask]
However, for multidimensional data, this flattens the array,
which may not be what one wants (especially if filtering was
done along a subset of the axes).
See Also
--------
SigmaClip
Examples
--------
This example generates random variates from a Gaussian distribution
and returns a masked array in which all points that are more than 2
sample standard deviations from the median are masked::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=2, iters=5)
This example sigma clips on a similar distribution, but uses 3 sigma
relative to the sample *mean*, clips until convergence, and does not
copy the data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=3, iters=None,
... cenfunc=mean, copy=False)
This example sigma clips along one axis on a similar distribution
(with bad points inserted)::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> filtered_data = sigma_clip(data, sigma=2.3, axis=0)
Note that along the other axis, no points would be masked, as the
variance is higher.
"""
sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower,
sigma_upper=sigma_upper, iters=iters,
cenfunc=cenfunc, stdfunc=stdfunc)
return sigclip(data, axis=axis, copy=copy)
def sigma_clipped_stats(data, mask=None, mask_value=None, sigma=3.0,
sigma_lower=None, sigma_upper=None, iters=5,
cenfunc=np.ma.median, stdfunc=np.std, std_ddof=0,
axis=None):
"""
Calculate sigma-clipped statistics on the provided data.
Parameters
----------
data : array-like
Data array or object that can be converted to an array.
mask : `numpy.ndarray` (bool), optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are excluded when computing the statistics.
mask_value : float, optional
A data value (e.g., ``0.0``) that is ignored when computing the
statistics. ``mask_value`` will be masked in addition to any
input ``mask``.
sigma : float, optional
The number of standard deviations to use as the lower and upper
clipping limit. These limits are overridden by ``sigma_lower``
and ``sigma_upper``, if input. Defaults to 3.
sigma_lower : float, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is used.
Defaults to `None`.
sigma_upper : float, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is used.
Defaults to `None`.
iters : int, optional
The number of iterations to perform sigma clipping, or `None` to
clip until convergence is achieved (i.e., continue until the
last iteration clips nothing) when calculating the statistics.
Defaults to 5.
cenfunc : callable, optional
The function used to compute the center for the clipping. Must
be a callable that takes in a masked array and outputs the
central value. Defaults to the median (`numpy.ma.median`).
stdfunc : callable, optional
The function used to compute the standard deviation about the
center. Must be a callable that takes in a masked array and
outputs a width estimator. Masked (rejected) pixels are those
where::
deviation < (-sigma_lower * stdfunc(deviation))
deviation > (sigma_upper * stdfunc(deviation))
where::
deviation = data - cenfunc(data [,axis=int])
Defaults to the standard deviation (`numpy.std`).
std_ddof : int, optional
The delta degrees of freedom for the standard deviation
calculation. The divisor used in the calculation is ``N -
std_ddof``, where ``N`` represents the number of elements. The
default is zero.
axis : `None` or int or tuple of int, optional
If not `None`, clip along the given axis or axes. For this case,
``axis`` will be passed on to ``cenfunc`` and ``stdfunc``, which
are expected to return an array with the axis dimension(s) removed
(like the numpy functions). If `None`, clip over all axes.
Defaults to `None`.
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped
data.
"""
if mask is not None:
data = np.ma.MaskedArray(data, mask)
if mask_value is not None:
data = np.ma.masked_values(data, mask_value)
data_clip = sigma_clip(data, sigma=sigma, sigma_lower=sigma_lower,
sigma_upper=sigma_upper, iters=iters,
cenfunc=cenfunc, stdfunc=stdfunc, axis=axis)
mean = np.ma.mean(data_clip, axis=axis)
median = np.ma.median(data_clip, axis=axis)
std = np.ma.std(data_clip, ddof=std_ddof, axis=axis)
if axis is None and np.ma.isMaskedArray(median):
# np.ma.median now always return a MaskedArray, even with one
# element. So for compatibility with previous versions of astropy,
# we keep taking the scalar value.
median = median.item()
return mean, median, std
|
225bec0376e85d5397857f1efa778a30fa8fc1bf127dbd3f44b6fc67d48b6ff4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Methods for selecting the bin width of histograms
Ported from the astroML project: http://astroML.org/
"""
import numpy as np
from . import bayesian_blocks
__all__ = ['histogram', 'scott_bin_width', 'freedman_bin_width',
'knuth_bin_width']
def histogram(a, bins=10, range=None, weights=None, **kwargs):
"""Enhanced histogram function, providing adaptive binnings
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as ``numpy.histogram()``.
Parameters
----------
a : array_like
array of data to be histogrammed
bins : int or list or str (optional)
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-Diaconis rule to determine bins
range : tuple or None (optional)
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
weights : array_like, optional
Not Implemented
other keyword arguments are described in numpy.histogram().
Returns
-------
hist : array
The values of the histogram. See ``density`` and ``weights`` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
numpy.histogram
"""
# if bins is a string, first compute bin edges with the desired heuristic
if isinstance(bins, str):
a = np.asarray(a).ravel()
# TODO: if weights is specified, we need to modify things.
# e.g. we could use point measures fitness for Bayesian blocks
if weights is not None:
raise NotImplementedError("weights are not yet supported "
"for the enhanced histogram")
# if range is specified, we need to truncate the data for
# the bin-finding routines
if range is not None:
a = a[(a >= range[0]) & (a <= range[1])]
if bins == 'blocks':
bins = bayesian_blocks(a)
elif bins == 'knuth':
da, bins = knuth_bin_width(a, True)
elif bins == 'scott':
da, bins = scott_bin_width(a, True)
elif bins == 'freedman':
da, bins = freedman_bin_width(a, True)
else:
raise ValueError("unrecognized bin code: '{}'".format(bins))
# Now we call numpy's histogram with the resulting bin edges
return np.histogram(a, bins=bins, range=range, weights=weights, **kwargs)
def scott_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using Scott's rule
Scott's rule is a normal reference rule: it minimizes the integrated
mean squared error in the bin approximation under the assumption that the
data is approximately Gaussian.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using Scott's rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{3.5\sigma}{n^{1/3}}
where :math:`\sigma` is the standard deviation of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] Scott, David W. (1979). "On optimal and data-based histograms".
Biometricka 66 (3): 605-610
See Also
--------
knuth_bin_width
freedman_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
sigma = np.std(data)
dx = 3.5 * sigma / (n ** (1 / 3))
if return_bins:
Nbins = np.ceil((data.max() - data.min()) / dx)
Nbins = max(1, Nbins)
bins = data.min() + dx * np.arange(Nbins + 1)
return dx, bins
else:
return dx
def freedman_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using the Freedman-Diaconis rule
The Freedman-Diaconis rule is a normal reference rule like Scott's
rule, but uses rank-based statistics for results which are more robust
to deviations from a normal distribution.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using the Freedman-Diaconis rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{2(q_{75} - q_{25})}{n^{1/3}}
where :math:`q_{N}` is the :math:`N` percent quartile of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] D. Freedman & P. Diaconis (1981)
"On the histogram as a density estimator: L2 theory".
Probability Theory and Related Fields 57 (4): 453-476
See Also
--------
knuth_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
if n < 4:
raise ValueError("data should have more than three entries")
v25, v75 = np.percentile(data, [25, 75])
dx = 2 * (v75 - v25) / (n ** (1 / 3))
if return_bins:
dmin, dmax = data.min(), data.max()
Nbins = max(1, np.ceil((dmax - dmin) / dx))
bins = dmin + dx * np.arange(Nbins + 1)
return dx, bins
else:
return dx
def knuth_bin_width(data, return_bins=False, quiet=True):
r"""Return the optimal histogram bin width using Knuth's rule.
Knuth's rule is a fixed-width, Bayesian approach to determining
the optimal bin width of a histogram.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
quiet : bool (optional)
if True (default) then suppress stdout output from scipy.optimize
Returns
-------
dx : float
optimal bin width. Bins are measured starting at the first data point.
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal number of bins is the value M which maximizes the function
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`
[1]_.
References
----------
.. [1] Knuth, K.H. "Optimal Data-Based Binning for Histograms".
arXiv:0605197, 2006
See Also
--------
freedman_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
# import here because of optional scipy dependency
from scipy import optimize
knuthF = _KnuthF(data)
dx0, bins0 = freedman_bin_width(data, True)
M = optimize.fmin(knuthF, len(bins0), disp=not quiet)[0]
bins = knuthF.bins(M)
dx = bins[1] - bins[0]
if return_bins:
return dx, bins
else:
return dx
class _KnuthF:
r"""Class which implements the function minimized by knuth_bin_width
Parameters
----------
data : array-like, one dimension
data to be histogrammed
Notes
-----
the function F is given by
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`.
See Also
--------
knuth_bin_width
"""
def __init__(self, data):
self.data = np.array(data, copy=True)
if self.data.ndim != 1:
raise ValueError("data should be 1-dimensional")
self.data.sort()
self.n = self.data.size
# import here rather than globally: scipy is an optional dependency.
# Note that scipy is imported in the function which calls this,
# so there shouldn't be any issue importing here.
from scipy import special
# create a reference to gammaln to use in self.eval()
self.gammaln = special.gammaln
def bins(self, M):
"""Return the bin edges given a width dx"""
return np.linspace(self.data[0], self.data[-1], int(M) + 1)
def __call__(self, M):
return self.eval(M)
def eval(self, M):
"""Evaluate the Knuth function
Parameters
----------
dx : float
Width of bins
Returns
-------
F : float
evaluation of the negative Knuth likelihood function:
smaller values indicate a better fit.
"""
M = int(M)
if M <= 0:
return np.inf
bins = self.bins(M)
nk, bins = np.histogram(self.data, bins)
return -(self.n * np.log(M) +
self.gammaln(0.5 * M) -
M * self.gammaln(0.5) -
self.gammaln(self.n + 0.5 * M) +
np.sum(self.gammaln(nk + 0.5)))
|
c713820ba9583c904719ad9304a90d35a319d421693556ac89ec4fdf5af49e1f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from functools import partial
from .core import Kernel, Kernel1D, Kernel2D, MAX_NORMALIZATION
from ..utils.exceptions import AstropyUserWarning
from ..utils.console import human_file_size
from ..utils.decorators import deprecated_renamed_argument
from .. import units as u
from ..nddata import support_nddata
from ..modeling.core import _make_arithmetic_operator, BINARY_OPERATORS
from ..modeling.core import _CompoundModelMeta
# Disabling all doctests in this module until a better way of handling warnings
# in doctests can be determined
__doctest_skip__ = ['*']
BOUNDARY_OPTIONS = [None, 'fill', 'wrap', 'extend']
@support_nddata(data='array')
def convolve(array, kernel, boundary='fill', fill_value=0.,
nan_treatment='interpolate', normalize_kernel=True, mask=None,
preserve_nan=False, normalization_zero_tol=1e-8):
'''
Convolve an array with a kernel.
This routine differs from `scipy.ndimage.convolve` because
it includes a special treatment for ``NaN`` values. Rather than
including ``NaN`` values in the array in the convolution calculation, which
causes large ``NaN`` holes in the convolved array, ``NaN`` values are
replaced with interpolated values using the kernel as an interpolation
function.
Parameters
----------
array : `numpy.ndarray` or `~astropy.nddata.NDData`
The array to convolve. This should be a 1, 2, or 3-dimensional array
or a list or a set of nested lists representing a 1, 2, or
3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of
the `~astropy.nddata.NDData` will be used as the ``mask`` argument.
kernel : `numpy.ndarray` or `~astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those for
the array, and the dimensions should be odd in all directions. If a
masked array, the masked values will be replaced by ``fill_value``.
boundary : str, optional
A flag indicating how to handle boundaries:
* `None`
Set the ``result`` values to zero where the kernel
extends beyond the edge of the array (default).
* 'fill'
Set values outside the array boundary to ``fill_value``.
* 'wrap'
Periodic boundary that wrap to the other side of ``array``.
* 'extend'
Set values outside the array to the nearest ``array``
value.
fill_value : float, optional
The value to use outside the array when using ``boundary='fill'``
normalize_kernel : bool, optional
Whether to normalize the kernel to have a sum of one prior to
convolving
nan_treatment : 'interpolate', 'fill'
interpolate will result in renormalization of the kernel at each
position ignoring (pixels that are NaN in the image) in both the image
and the kernel.
'fill' will replace the NaN pixels with a fixed numerical value (default
zero, see ``fill_value``) prior to convolution
Note that if the kernel has a sum equal to zero, NaN interpolation
is not possible and will raise an exception
preserve_nan : bool
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : `None` or `numpy.ndarray`
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
normalization_zero_tol: float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
Returns
-------
result : `numpy.ndarray`
An array with the same dimensions and as the input array,
convolved with kernel. The data type depends on the input
array type. If array is a floating point type, then the
return array keeps the same data type, otherwise the type
is ``numpy.float``.
Notes
-----
For masked arrays, masked values are treated as NaNs. The convolution
is always done at ``numpy.float`` precision.
'''
from .boundary_none import (convolve1d_boundary_none,
convolve2d_boundary_none,
convolve3d_boundary_none)
from .boundary_extend import (convolve1d_boundary_extend,
convolve2d_boundary_extend,
convolve3d_boundary_extend)
from .boundary_fill import (convolve1d_boundary_fill,
convolve2d_boundary_fill,
convolve3d_boundary_fill)
from .boundary_wrap import (convolve1d_boundary_wrap,
convolve2d_boundary_wrap,
convolve3d_boundary_wrap)
if boundary not in BOUNDARY_OPTIONS:
raise ValueError("Invalid boundary option: must be one of {0}"
.format(BOUNDARY_OPTIONS))
if nan_treatment not in ('interpolate', 'fill'):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# The cython routines all need float type inputs (so, a particular
# bit size, endianness, etc.). So we have to convert, which also
# has the effect of making copies so we don't modify the inputs.
# After this, the variables we work with will be array_internal, and
# kernel_internal. However -- we do want to keep track of what type
# the input array was so we can cast the result to that at the end
# if it's a floating point type. Don't bother with this for lists --
# just always push those as float.
# It is always necessary to make a copy of kernel (since it is modified),
# but, if we just so happen to be lucky enough to have the input array
# have exactly the desired type, we just alias to array_internal
# Check if kernel is kernel instance
if isinstance(kernel, Kernel):
# Check if array is also kernel instance, if so convolve and
# return new kernel instance
if isinstance(array, Kernel):
if isinstance(array, Kernel1D) and isinstance(kernel, Kernel1D):
new_array = convolve1d_boundary_fill(array.array, kernel.array,
0, True)
new_kernel = Kernel1D(array=new_array)
elif isinstance(array, Kernel2D) and isinstance(kernel, Kernel2D):
new_array = convolve2d_boundary_fill(array.array, kernel.array,
0, True)
new_kernel = Kernel2D(array=new_array)
else:
raise Exception("Can't convolve 1D and 2D kernel.")
new_kernel._separable = kernel._separable and array._separable
new_kernel._is_bool = False
return new_kernel
kernel = kernel.array
# Check that the arguments are lists or Numpy arrays
if isinstance(array, list):
array_internal = np.array(array, dtype=float)
array_dtype = array_internal.dtype
elif isinstance(array, np.ndarray):
# Note this won't copy if it doesn't have to -- which is okay
# because none of what follows modifies array_internal.
array_dtype = array.dtype
array_internal = array.astype(float, copy=False)
else:
raise TypeError("array should be a list or a Numpy array")
if isinstance(kernel, list):
kernel_internal = np.array(kernel, dtype=float)
elif isinstance(kernel, np.ndarray):
# Note this always makes a copy, since we will be modifying it
kernel_internal = kernel.astype(float)
else:
raise TypeError("kernel should be a list or a Numpy array")
# Check that the number of dimensions is compatible
if array_internal.ndim != kernel_internal.ndim:
raise Exception('array and kernel have differing number of '
'dimensions.')
# anything that's masked must be turned into NaNs for the interpolation.
# This requires copying the array_internal
array_internal_copied = False
if np.ma.is_masked(array):
array_internal = array_internal.filled(np.nan)
array_internal_copied = True
if mask is not None:
if not array_internal_copied:
array_internal = array_internal.copy()
array_internal_copied = True
# mask != 0 yields a bool mask for all ints/floats/bool
array_internal[mask != 0] = np.nan
if np.ma.is_masked(kernel):
# *kernel* doesn't support NaN interpolation, so instead we just fill it
kernel_internal = kernel.filled(fill_value)
# Mark the NaN values so we can replace them later if interpolate_nan is
# not set
if preserve_nan:
badvals = np.isnan(array_internal)
if nan_treatment == 'fill':
initially_nan = np.isnan(array_internal)
array_internal[initially_nan] = fill_value
# Because the Cython routines have to normalize the kernel on the fly, we
# explicitly normalize the kernel here, and then scale the image at the
# end if normalization was not requested.
kernel_sum = kernel_internal.sum()
kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol)
if (kernel_sum < 1. / MAX_NORMALIZATION or kernel_sums_to_zero) and normalize_kernel:
raise Exception("The kernel can't be normalized, because its sum is "
"close to zero. The sum of the given kernel is < {0}"
.format(1. / MAX_NORMALIZATION))
if not kernel_sums_to_zero:
kernel_internal /= kernel_sum
else:
kernel_internal = kernel
renormalize_by_kernel = not kernel_sums_to_zero
if array_internal.ndim == 0:
raise Exception("cannot convolve 0-dimensional arrays")
elif array_internal.ndim == 1:
if boundary == 'extend':
result = convolve1d_boundary_extend(array_internal,
kernel_internal,
renormalize_by_kernel)
elif boundary == 'fill':
result = convolve1d_boundary_fill(array_internal,
kernel_internal,
float(fill_value),
renormalize_by_kernel)
elif boundary == 'wrap':
result = convolve1d_boundary_wrap(array_internal,
kernel_internal,
renormalize_by_kernel)
elif boundary is None:
result = convolve1d_boundary_none(array_internal,
kernel_internal,
renormalize_by_kernel)
elif array_internal.ndim == 2:
if boundary == 'extend':
result = convolve2d_boundary_extend(array_internal,
kernel_internal,
renormalize_by_kernel,
)
elif boundary == 'fill':
result = convolve2d_boundary_fill(array_internal,
kernel_internal,
float(fill_value),
renormalize_by_kernel,
)
elif boundary == 'wrap':
result = convolve2d_boundary_wrap(array_internal,
kernel_internal,
renormalize_by_kernel,
)
elif boundary is None:
result = convolve2d_boundary_none(array_internal,
kernel_internal,
renormalize_by_kernel,
)
elif array_internal.ndim == 3:
if boundary == 'extend':
result = convolve3d_boundary_extend(array_internal,
kernel_internal,
renormalize_by_kernel)
elif boundary == 'fill':
result = convolve3d_boundary_fill(array_internal,
kernel_internal,
float(fill_value),
renormalize_by_kernel)
elif boundary == 'wrap':
result = convolve3d_boundary_wrap(array_internal,
kernel_internal,
renormalize_by_kernel)
elif boundary is None:
result = convolve3d_boundary_none(array_internal,
kernel_internal,
renormalize_by_kernel)
else:
raise NotImplementedError('convolve only supports 1, 2, and 3-dimensional '
'arrays at this time')
# If normalization was not requested, we need to scale the array (since
# the kernel is effectively normalized within the cython functions)
if not normalize_kernel and not kernel_sums_to_zero:
result *= kernel_sum
if preserve_nan:
result[badvals] = np.nan
if nan_treatment == 'fill':
array_internal[initially_nan] = np.nan
# Try to preserve the input type if it's a floating point type
if array_dtype.kind == 'f':
# Avoid making another copy if possible
try:
return result.astype(array_dtype, copy=False)
except TypeError:
return result.astype(array_dtype)
else:
return result
@deprecated_renamed_argument('interpolate_nan', 'nan_treatment', 'v2.0.0')
@support_nddata(data='array')
def convolve_fft(array, kernel, boundary='fill', fill_value=0.,
nan_treatment='interpolate', normalize_kernel=True,
normalization_zero_tol=1e-8,
preserve_nan=False, mask=None, crop=True, return_fft=False,
fft_pad=None, psf_pad=None, quiet=False,
min_wt=0.0, allow_huge=False,
fftn=np.fft.fftn, ifftn=np.fft.ifftn,
complex_dtype=complex):
"""
Convolve an ndarray with an nd-kernel. Returns a convolved image with
``shape = array.shape``. Assumes kernel is centered.
`convolve_fft` is very similar to `convolve` in that it replaces ``NaN``
values in the original image with interpolated values using the kernel as
an interpolation function. However, it also includes many additional
options specific to the implementation.
`convolve_fft` differs from `scipy.signal.fftconvolve` in a few ways:
* It can treat ``NaN`` values as zeros or interpolate over them.
* ``inf`` values are treated as ``NaN``
* (optionally) It pads to the nearest 2^n size to improve FFT speed.
* Its only valid ``mode`` is 'same' (i.e., the same shape array is returned)
* It lets you use your own fft, e.g.,
`pyFFTW <https://pypi.python.org/pypi/pyFFTW>`_ or
`pyFFTW3 <https://pypi.python.org/pypi/PyFFTW3/0.2.1>`_ , which can lead to
performance improvements, depending on your system configuration. pyFFTW3
is threaded, and therefore may yield significant performance benefits on
multi-core machines at the cost of greater memory requirements. Specify
the ``fftn`` and ``ifftn`` keywords to override the default, which is
`numpy.fft.fft` and `numpy.fft.ifft`.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric)
boundary : {'fill', 'wrap'}, optional
A flag indicating how to handle boundaries:
* 'fill': set values outside the array boundary to fill_value
(default)
* 'wrap': periodic boundary
The `None` and 'extend' parameters are not supported for FFT-based
convolution
fill_value : float, optional
The value to use outside the array when using boundary='fill'
nan_treatment : 'interpolate', 'fill'
``interpolate`` will result in renormalization of the kernel at each
position ignoring (pixels that are NaN in the image) in both the image
and the kernel. ``fill`` will replace the NaN pixels with a fixed
numerical value (default zero, see ``fill_value``) prior to
convolution. Note that if the kernel has a sum equal to zero, NaN
interpolation is not possible and will raise an exception.
normalize_kernel : function or boolean, optional
If specified, this is the function to divide kernel by to normalize it.
e.g., ``normalize_kernel=np.sum`` means that kernel will be modified to be:
``kernel = kernel / np.sum(kernel)``. If True, defaults to
``normalize_kernel = np.sum``.
normalization_zero_tol: float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
preserve_nan : bool
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : `None` or `numpy.ndarray`
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
Other Parameters
----------------
min_wt : float, optional
If ignoring ``NaN`` / zeros, force all grid points with a weight less than
this value to ``NaN`` (the weight of a grid point with *no* ignored
neighbors is 1.0).
If ``min_wt`` is zero, then all zero-weight points will be set to zero
instead of ``NaN`` (which they would be otherwise, because 1/0 = nan).
See the examples below
fft_pad : bool, optional
Default on. Zero-pad image to the nearest 2^n. With
``boundary='wrap'``, this will be disabled.
psf_pad : bool, optional
Zero-pad image to be at least the sum of the image sizes to avoid
edge-wrapping when smoothing. This is enabled by default with
``boundary='fill'``, but it can be overridden with a boolean option.
``boundary='wrap'`` and ``psf_pad=True`` are not compatible.
crop : bool, optional
Default on. Return an image of the size of the larger of the input
image and the kernel.
If the image and kernel are asymmetric in opposite directions, will
return the largest image in both directions.
For example, if an input image has shape [100,3] but a kernel with shape
[6,6] is used, the output will be [100,6].
return_fft : bool, optional
Return the ``fft(image)*fft(kernel)`` instead of the convolution (which is
``ifft(fft(image)*fft(kernel))``). Useful for making PSDs.
fftn, ifftn : functions, optional
The fft and inverse fft functions. Can be overridden to use your own
ffts, e.g. an fftw3 wrapper or scipy's fftn,
``fft=scipy.fftpack.fftn``
complex_dtype : numpy.complex, optional
Which complex dtype to use. `numpy` has a range of options, from 64 to
256.
quiet : bool, optional
Silence warning message about NaN interpolation
allow_huge : bool, optional
Allow huge arrays in the FFT? If False, will raise an exception if the
array or kernel size is >1 GB
Raises
------
ValueError:
If the array is bigger than 1 GB after padding, will raise this exception
unless ``allow_huge`` is True
See Also
--------
convolve:
Convolve is a non-fft version of this code. It is more memory
efficient and for small kernels can be faster.
Returns
-------
default : ndarray
``array`` convolved with ``kernel``. If ``return_fft`` is set, returns
``fft(array) * fft(kernel)``. If crop is not set, returns the
image, but with the fft-padded size instead of the input size
Notes
-----
With ``psf_pad=True`` and a large PSF, the resulting data can become
very large and consume a lot of memory. See Issue
https://github.com/astropy/astropy/pull/4366 for further detail.
Examples
--------
>>> convolve_fft([1, 0, 3], [1, 1, 1])
array([ 1., 4., 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1])
array([ 1., 4., 3.])
>>> convolve_fft([1, 0, 3], [0, 1, 0])
array([ 1., 0., 3.])
>>> convolve_fft([1, 2, 3], [1])
array([ 1., 2., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate')
...
array([ 1., 0., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate',
... min_wt=1e-8)
array([ 1., nan, 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate')
array([ 1., 4., 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True)
array([ 1., 2., 3.])
>>> import scipy.fftpack # optional - requires scipy
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True,
... fftn=scipy.fftpack.fft, ifftn=scipy.fftpack.ifft)
array([ 1., 2., 3.])
"""
# Checking copied from convolve.py - however, since FFTs have real &
# complex components, we change the types. Only the real part will be
# returned! Note that this always makes a copy.
# Check kernel is kernel instance
if isinstance(kernel, Kernel):
kernel = kernel.array
if isinstance(array, Kernel):
raise TypeError("Can't convolve two kernels with convolve_fft. "
"Use convolve instead.")
if nan_treatment not in ('interpolate', 'fill'):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# Convert array dtype to complex
# and ensure that list inputs become arrays
array = np.asarray(array, dtype=complex)
kernel = np.asarray(kernel, dtype=complex)
# Check that the number of dimensions is compatible
if array.ndim != kernel.ndim:
raise ValueError("Image and kernel must have same number of "
"dimensions")
arrayshape = array.shape
kernshape = kernel.shape
array_size_B = (np.product(arrayshape, dtype=np.int64) *
np.dtype(complex_dtype).itemsize)*u.byte
if array_size_B > 1*u.GB and not allow_huge:
raise ValueError("Size Error: Arrays will be {}. Use "
"allow_huge=True to override this exception."
.format(human_file_size(array_size_B.to_value(u.byte))))
# mask catching - masks must be turned into NaNs for use later in the image
if np.ma.is_masked(array):
mamask = array.mask
array = np.array(array)
array[mamask] = np.nan
elif mask is not None:
# copying here because we have to mask it below. But no need to copy
# if mask is None because we won't modify it.
array = np.array(array)
if mask is not None:
# mask != 0 yields a bool mask for all ints/floats/bool
array[mask != 0] = np.nan
# the *kernel* doesn't support NaN interpolation, so instead we just fill it
if np.ma.is_masked(kernel):
kernel = kernel.filled(0)
# NaN and inf catching
nanmaskarray = np.isnan(array) | np.isinf(array)
array[nanmaskarray] = 0
nanmaskkernel = np.isnan(kernel) | np.isinf(kernel)
kernel[nanmaskkernel] = 0
if normalize_kernel is True:
if kernel.sum() < 1. / MAX_NORMALIZATION:
raise Exception("The kernel can't be normalized, because its sum is "
"close to zero. The sum of the given kernel is < {0}"
.format(1. / MAX_NORMALIZATION))
kernel_scale = kernel.sum()
normalized_kernel = kernel / kernel_scale
kernel_scale = 1 # if we want to normalize it, leave it normed!
elif normalize_kernel:
# try this. If a function is not passed, the code will just crash... I
# think type checking would be better but PEPs say otherwise...
kernel_scale = normalize_kernel(kernel)
normalized_kernel = kernel / kernel_scale
else:
kernel_scale = kernel.sum()
if np.abs(kernel_scale) < normalization_zero_tol:
if nan_treatment == 'interpolate':
raise ValueError('Cannot interpolate NaNs with an unnormalizable kernel')
else:
# the kernel's sum is near-zero, so it can't be scaled
kernel_scale = 1
normalized_kernel = kernel
else:
# the kernel is normalizable; we'll temporarily normalize it
# now and undo the normalization later.
normalized_kernel = kernel / kernel_scale
if boundary is None:
warnings.warn("The convolve_fft version of boundary=None is "
"equivalent to the convolve boundary='fill'. There is "
"no FFT equivalent to convolve's "
"zero-if-kernel-leaves-boundary", AstropyUserWarning)
if psf_pad is None:
psf_pad = True
if fft_pad is None:
fft_pad = True
elif boundary == 'fill':
# create a boundary region at least as large as the kernel
if psf_pad is False:
warnings.warn("psf_pad was set to {0}, which overrides the "
"boundary='fill' setting.".format(psf_pad),
AstropyUserWarning)
else:
psf_pad = True
if fft_pad is None:
# default is 'True' according to the docstring
fft_pad = True
elif boundary == 'wrap':
if psf_pad:
raise ValueError("With boundary='wrap', psf_pad cannot be enabled.")
psf_pad = False
if fft_pad:
raise ValueError("With boundary='wrap', fft_pad cannot be enabled.")
fft_pad = False
fill_value = 0 # force zero; it should not be used
elif boundary == 'extend':
raise NotImplementedError("The 'extend' option is not implemented "
"for fft-based convolution")
# find ideal size (power of 2) for fft.
# Can add shapes because they are tuples
if fft_pad: # default=True
if psf_pad: # default=False
# add the dimensions and then take the max (bigger)
fsize = 2 ** np.ceil(np.log2(
np.max(np.array(arrayshape) + np.array(kernshape))))
else:
# add the shape lists (max of a list of length 4) (smaller)
# also makes the shapes square
fsize = 2 ** np.ceil(np.log2(np.max(arrayshape + kernshape)))
newshape = np.array([fsize for ii in range(array.ndim)], dtype=int)
else:
if psf_pad:
# just add the biggest dimensions
newshape = np.array(arrayshape) + np.array(kernshape)
else:
newshape = np.array([np.max([imsh, kernsh])
for imsh, kernsh in zip(arrayshape, kernshape)])
# perform a second check after padding
array_size_C = (np.product(newshape, dtype=np.int64) *
np.dtype(complex_dtype).itemsize)*u.byte
if array_size_C > 1*u.GB and not allow_huge:
raise ValueError("Size Error: Arrays will be {}. Use "
"allow_huge=True to override this exception."
.format(human_file_size(array_size_C)))
# For future reference, this can be used to predict "almost exactly"
# how much *additional* memory will be used.
# size * (array + kernel + kernelfft + arrayfft +
# (kernel*array)fft +
# optional(weight image + weight_fft + weight_ifft) +
# optional(returned_fft))
# total_memory_used_GB = (np.product(newshape)*np.dtype(complex_dtype).itemsize
# * (5 + 3*((interpolate_nan or ) and kernel_is_normalized))
# + (1 + (not return_fft)) *
# np.product(arrayshape)*np.dtype(complex_dtype).itemsize
# + np.product(arrayshape)*np.dtype(bool).itemsize
# + np.product(kernshape)*np.dtype(bool).itemsize)
# ) / 1024.**3
# separate each dimension by the padding size... this is to determine the
# appropriate slice size to get back to the input dimensions
arrayslices = []
kernslices = []
for ii, (newdimsize, arraydimsize, kerndimsize) in enumerate(zip(newshape, arrayshape, kernshape)):
center = newdimsize - (newdimsize + 1) // 2
arrayslices += [slice(center - arraydimsize // 2,
center + (arraydimsize + 1) // 2)]
kernslices += [slice(center - kerndimsize // 2,
center + (kerndimsize + 1) // 2)]
if not np.all(newshape == arrayshape):
if np.isfinite(fill_value):
bigarray = np.ones(newshape, dtype=complex_dtype) * fill_value
else:
bigarray = np.zeros(newshape, dtype=complex_dtype)
bigarray[arrayslices] = array
else:
bigarray = array
if not np.all(newshape == kernshape):
bigkernel = np.zeros(newshape, dtype=complex_dtype)
bigkernel[kernslices] = normalized_kernel
else:
bigkernel = normalized_kernel
arrayfft = fftn(bigarray)
# need to shift the kernel so that, e.g., [0,0,1,0] -> [1,0,0,0] = unity
kernfft = fftn(np.fft.ifftshift(bigkernel))
fftmult = arrayfft * kernfft
interpolate_nan = (nan_treatment == 'interpolate')
if interpolate_nan:
if not np.isfinite(fill_value):
bigimwt = np.zeros(newshape, dtype=complex_dtype)
else:
bigimwt = np.ones(newshape, dtype=complex_dtype)
bigimwt[arrayslices] = 1.0 - nanmaskarray * interpolate_nan
wtfft = fftn(bigimwt)
# You can only get to this point if kernel_is_normalized
wtfftmult = wtfft * kernfft
wtsm = ifftn(wtfftmult)
# need to re-zero weights outside of the image (if it is padded, we
# still don't weight those regions)
bigimwt[arrayslices] = wtsm.real[arrayslices]
else:
bigimwt = 1
if np.isnan(fftmult).any():
# this check should be unnecessary; call it an insanity check
raise ValueError("Encountered NaNs in convolve. This is disallowed.")
# restore NaNs in original image (they were modified inplace earlier)
# We don't have to worry about masked arrays - if input was masked, it was
# copied
array[nanmaskarray] = np.nan
kernel[nanmaskkernel] = np.nan
fftmult *= kernel_scale
if return_fft:
return fftmult
if interpolate_nan:
rifft = (ifftn(fftmult)) / bigimwt
if not np.isscalar(bigimwt):
if min_wt > 0.:
rifft[bigimwt < min_wt] = np.nan
else:
# Set anything with no weight to zero (taking into account
# slight offsets due to floating-point errors).
rifft[bigimwt < 10 * np.finfo(bigimwt.dtype).eps] = 0.0
else:
rifft = ifftn(fftmult)
if preserve_nan:
rifft[arrayslices][nanmaskarray] = np.nan
if crop:
result = rifft[arrayslices].real
return result
else:
return rifft.real
def interpolate_replace_nans(array, kernel, convolve=convolve, **kwargs):
"""
Given a data set containing NaNs, replace the NaNs by interpolating from
neighboring data points with a given kernel.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric). The kernel
*must be normalizable* (i.e., its sum cannot be zero).
convolve : `convolve` or `convolve_fft`
One of the two convolution functions defined in this package.
Returns
-------
newarray : `numpy.ndarray`
A copy of the original array with NaN pixels replaced with their
interpolated counterparts
"""
if not np.any(np.isnan(array)):
return array.copy()
newarray = array.copy()
convolved = convolve(array, kernel, nan_treatment='interpolate',
normalize_kernel=True, **kwargs)
isnan = np.isnan(array)
newarray[isnan] = convolved[isnan]
return newarray
def convolve_models(model, kernel, mode='convolve_fft', **kwargs):
"""
Convolve two models using `~astropy.convolution.convolve_fft`.
Parameters
----------
model : `~astropy.modeling.core.Model`
Functional model
kernel : `~astropy.modeling.core.Model`
Convolution kernel
mode : str
Keyword representing which function to use for convolution.
* 'convolve_fft' : use `~astropy.convolution.convolve_fft` function.
* 'convolve' : use `~astropy.convolution.convolve`.
kwargs : dict
Keyword arguments to me passed either to `~astropy.convolution.convolve`
or `~astropy.convolution.convolve_fft` depending on ``mode``.
Returns
-------
default : CompoundModel
Convolved model
"""
if mode == 'convolve_fft':
BINARY_OPERATORS['convolve_fft'] = _make_arithmetic_operator(partial(convolve_fft, **kwargs))
elif mode == 'convolve':
BINARY_OPERATORS['convolve'] = _make_arithmetic_operator(partial(convolve, **kwargs))
else:
raise ValueError('Mode {} is not supported.'.format(mode))
return _CompoundModelMeta._from_operator(mode, model, kernel)
|
74d4ad95e83d6cc46dd86d416934e3db66196942ad62b9302f2764410e5e02d2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements classes (called Fitters) which combine optimization
algorithms (typically from `scipy.optimize`) with statistic functions to perform
fitting. Fitters are implemented as callable classes. In addition to the data
to fit, the ``__call__`` method takes an instance of
`~astropy.modeling.core.FittableModel` as input, and returns a copy of the
model with its parameters determined by the optimizer.
Optimization algorithms, called "optimizers" are implemented in
`~astropy.modeling.optimizers` and statistic functions are in
`~astropy.modeling.statistic`. The goal is to provide an easy to extend
framework and allow users to easily create new fitters by combining statistics
with optimizers.
There are two exceptions to the above scheme.
`~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq`
function. `~astropy.modeling.fitting.LevMarLSQFitter` uses
`~scipy.optimize.leastsq` which combines optimization and statistic in one
implementation.
"""
import abc
import inspect
import operator
import warnings
from functools import reduce, wraps
import numpy as np
from .utils import poly_map_domain, _combine_equivalency_dict
from ..units import Quantity
from ..utils.exceptions import AstropyUserWarning
from .optimizers import (SLSQP, Simplex)
from .statistic import (leastsquare)
# Check pkg_resources exists
try:
from pkg_resources import iter_entry_points
HAS_PKG = True
except ImportError:
HAS_PKG = False
__all__ = ['LinearLSQFitter', 'LevMarLSQFitter', 'FittingWithOutlierRemoval',
'SLSQPLSQFitter', 'SimplexLSQFitter', 'JointFitter', 'Fitter']
# Statistic functions implemented in `astropy.modeling.statistic.py
STATISTICS = [leastsquare]
# Optimizers implemented in `astropy.modeling.optimizers.py
OPTIMIZERS = [Simplex, SLSQP]
from .optimizers import (DEFAULT_MAXITER, DEFAULT_EPS, DEFAULT_ACC)
class ModelsError(Exception):
"""Base class for model exceptions"""
class ModelLinearityError(ModelsError):
""" Raised when a non-linear model is passed to a linear fitter."""
class UnsupportedConstraintError(ModelsError, ValueError):
"""
Raised when a fitter does not support a type of constraint.
"""
class _FitterMeta(abc.ABCMeta):
"""
Currently just provides a registry for all Fitter classes.
"""
registry = set()
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
if not inspect.isabstract(cls) and not name.startswith('_'):
mcls.registry.add(cls)
return cls
def fitter_unit_support(func):
"""
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed.
"""
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop('equivalencies', None)
data_has_units = (isinstance(x, Quantity) or
isinstance(y, Quantity) or
isinstance(z, Quantity))
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
# We now combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies)
# If input_units is defined, we transform the input data into those
# expected by the model. We hard-code the input names 'x', and 'y'
# here since FittableModel instances have input names ('x',) or
# ('x', 'y')
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(model.input_units['x'], equivalencies=input_units_equivalencies['x'])
if isinstance(y, Quantity) and z is not None:
y = y.to(model.input_units['y'], equivalencies=input_units_equivalencies['y'])
# We now strip away the units from the parameters, taking care to
# first convert any parameters to the units that correspond to the
# input units (to make sure that initial guesses on the parameters)
# are in the right unit system
model = model.without_units_for_data(x=x, y=y, z=z)
# We strip away the units from the input itself
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(y, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
# We run the fitting
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
# And finally we add back units to the parameters
if add_back_units:
model_new = model_new.with_units_from_data(x=x, y=y, z=z)
return model_new
else:
raise NotImplementedError("This model does not support being fit to data with units")
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper
class Fitter(metaclass=_FitterMeta):
"""
Base class for all fitters.
Parameters
----------
optimizer : callable
A callable implementing an optimization algorithm
statistic : callable
Statistic function
"""
def __init__(self, optimizer, statistic):
if optimizer is None:
raise ValueError("Expected an optimizer.")
if statistic is None:
raise ValueError("Expected a statistic function.")
if inspect.isclass(optimizer):
# a callable class
self._opt_method = optimizer()
elif inspect.isfunction(optimizer):
self._opt_method = optimizer
else:
raise ValueError("Expected optimizer to be a callable class or a function.")
if inspect.isclass(statistic):
self._stat_method = statistic()
else:
self._stat_method = statistic
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [other_args], [input coordinates]]
other_args may include weights or any other quantities specific for
a statistic
Notes
-----
The list of arguments (args) is set in the `__call__` method.
Fitters may overwrite this method, e.g. when statistic functions
require other arguments.
"""
model = args[0]
meas = args[-1]
_fitter_to_model_params(model, fps)
res = self._stat_method(meas, model, *args[1:-1])
return res
@abc.abstractmethod
def __call__(self):
"""
This method performs the actual fitting and modifies the parameter list
of a model.
Fitter subclasses should implement this method.
"""
raise NotImplementedError("Subclasses should implement this method.")
# TODO: I have ongoing branch elsewhere that's refactoring this module so that
# all the fitter classes in here are Fitter subclasses. In the meantime we
# need to specify that _FitterMeta is its metaclass.
class LinearLSQFitter(metaclass=_FitterMeta):
"""
A class performing a linear least square fitting.
Uses `numpy.linalg.lstsq` to do the fitting.
Given a model and data, fits the model to the data and changes the
model's parameters. Keeps a dictionary of auxiliary fitting information.
Notes
-----
Note that currently LinearLSQFitter does not support compound models.
"""
supported_constraints = ['fixed']
supports_masked_input = True
def __init__(self):
self.fit_info = {'residuals': None,
'rank': None,
'singular_values': None,
'params': None
}
@staticmethod
def _deriv_with_constraints(model, param_indices, x=None, y=None):
if y is None:
d = np.array(model.fit_deriv(x, *model.parameters))
else:
d = np.array(model.fit_deriv(x, y, *model.parameters))
if model.col_fit_deriv:
return d[param_indices]
else:
return d[..., param_indices]
def _map_domain_window(self, model, x, y=None):
"""
Maps domain into window for a polynomial model which has these
attributes.
"""
if y is None:
if hasattr(model, 'domain') and model.domain is None:
model.domain = [x.min(), x.max()]
if hasattr(model, 'window') and model.window is None:
model.window = [-1, 1]
return poly_map_domain(x, model.domain, model.window)
else:
if hasattr(model, 'x_domain') and model.x_domain is None:
model.x_domain = [x.min(), x.max()]
if hasattr(model, 'y_domain') and model.y_domain is None:
model.y_domain = [y.min(), y.max()]
if hasattr(model, 'x_window') and model.x_window is None:
model.x_window = [-1., 1.]
if hasattr(model, 'y_window') and model.y_window is None:
model.y_window = [-1., 1.]
xnew = poly_map_domain(x, model.x_domain, model.x_window)
ynew = poly_map_domain(y, model.y_domain, model.y_window)
return xnew, ynew
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, rcond=None):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
Input coordinates
y : array-like
Input coordinates
z : array-like (optional)
Input coordinates.
If the dependent (``y`` or ``z``) co-ordinate values are provided
as a `numpy.ma.MaskedArray`, any masked points are ignored when
fitting. Note that model set fitting is significantly slower when
there are masked points (not just an empty mask), as the matrix
equation has to be solved for each model separately when their
co-ordinate grids differ.
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
rcond : float, optional
Cut-off ratio for small singular values of ``a``.
Singular values are set to zero if they are smaller than ``rcond``
times the largest singular value of ``a``.
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
if not model.fittable:
raise ValueError("Model must be a subclass of FittableModel")
if not model.linear:
raise ModelLinearityError('Model is not linear in parameters, '
'linear fit methods should not be used.')
if hasattr(model, "submodel_names"):
raise ValueError("Model must be simple, not compound")
_validate_constraints(self.supported_constraints, model)
model_copy = model.copy()
_, fitparam_indices = _model_to_fit_params(model_copy)
if model_copy.n_inputs == 2 and z is None:
raise ValueError("Expected x, y and z for a 2 dimensional model.")
farg = _convert_input(x, y, z, n_models=len(model_copy),
model_set_axis=model_copy.model_set_axis)
has_fixed = any(model_copy.fixed.values())
if has_fixed:
# The list of fixed params is the complement of those being fitted:
fixparam_indices = [idx for idx in
range(len(model_copy.param_names))
if idx not in fitparam_indices]
# Construct matrix of user-fixed parameters that can be dotted with
# the corresponding fit_deriv() terms, to evaluate corrections to
# the dependent variable in order to fit only the remaining terms:
fixparams = np.asarray([getattr(model_copy,
model_copy.param_names[idx]).value
for idx in fixparam_indices])
if len(farg) == 2:
x, y = farg
# map domain into window
if hasattr(model_copy, 'domain'):
x = self._map_domain_window(model_copy, x)
if has_fixed:
lhs = self._deriv_with_constraints(model_copy,
fitparam_indices,
x=x)
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices,
x=x)
else:
lhs = model_copy.fit_deriv(x, *model_copy.parameters)
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x)
rhs = y
else:
x, y, z = farg
# map domain into window
if hasattr(model_copy, 'x_domain'):
x, y = self._map_domain_window(model_copy, x, y)
if has_fixed:
lhs = self._deriv_with_constraints(model_copy,
fitparam_indices, x=x, y=y)
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices, x=x, y=y)
else:
lhs = model_copy.fit_deriv(x, y, *model_copy.parameters)
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y)
if len(model_copy) > 1:
# Just to be explicit (rather than baking in False == 0):
model_axis = model_copy.model_set_axis or 0
if z.ndim > 2:
# For higher-dimensional z, flatten all the axes except the
# dimension along which models are stacked and transpose so
# the model axis is *last* (I think this resolves Erik's
# pending generalization from 80a6f25a):
rhs = np.rollaxis(z, model_axis, z.ndim)
rhs = rhs.reshape(-1, rhs.shape[-1])
else:
# This "else" seems to handle the corner case where the
# user has already flattened x/y before attempting a 2D fit
# but z has a second axis for the model set. NB. This is
# ~5-10x faster than using rollaxis.
rhs = z.T if model_axis == 0 else z
else:
rhs = z.flatten()
# If the derivative is defined along rows (as with non-linear models)
if model_copy.col_fit_deriv:
lhs = np.asarray(lhs).T
# Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs
# when constructing their Vandermonde matrix, which can lead to obscure
# failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices,
# so just raise a slightly more informative error when this happens:
if lhs.ndim > 2:
raise ValueError('{0} gives unsupported >2D derivative matrix for '
'this x/y'.format(type(model_copy).__name__))
# Subtract any terms fixed by the user from (a copy of) the RHS, in
# order to fit the remaining terms correctly:
if has_fixed:
if model_copy.col_fit_deriv:
fixderivs = np.asarray(fixderivs).T # as for lhs above
rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms
# Subtract any terms implicit in the model from the RHS, which, like
# user-fixed terms, affect the dependent variable but are not fitted:
if sum_of_implicit_terms is not None:
# If we have a model set, the extra axis must be added to
# sum_of_implicit_terms as its innermost dimension, to match the
# dimensionality of rhs after _convert_input "rolls" it as needed
# by np.linalg.lstsq. The vector then gets broadcast to the right
# number of sets (columns). This assumes all the models share the
# same input co-ordinates, as is currently the case.
if len(model_copy) > 1:
sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis]
rhs = rhs - sum_of_implicit_terms
if weights is not None:
weights = np.asarray(weights, dtype=float)
if len(x) != len(weights):
raise ValueError("x and weights should have the same length")
if rhs.ndim == 2:
lhs *= weights[:, np.newaxis]
# Don't modify in-place in case rhs was the original dependent
# variable array
rhs = rhs * weights[:, np.newaxis]
else:
lhs *= weights[:, np.newaxis]
rhs = rhs * weights
if rcond is None:
rcond = len(x) * np.finfo(x.dtype).eps
scl = (lhs * lhs).sum(0)
lhs /= scl
masked = np.any(np.ma.getmask(rhs))
if len(model_copy) == 1 or not masked:
# If we're fitting one or more models over a common set of points,
# we only have to solve a single matrix equation, which is an order
# of magnitude faster than calling lstsq() once per model below:
good = ~rhs.mask if masked else slice(None) # latter is a no-op
# Solve for one or more models:
lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good],
rhs[good], rcond)
else:
# Where fitting multiple models with masked pixels, initialize an
# empty array of coefficients and populate it one model at a time.
# The shape matches the number of coefficients from the Vandermonde
# matrix and the number of models from the RHS:
lacoef = np.zeros(lhs.shape[-1:] + rhs.shape[-1:], dtype=rhs.dtype)
# Loop over the models and solve for each one. By this point, the
# model set axis is the second of two. Transpose rather than using,
# say, np.moveaxis(array, -1, 0), since it's slightly faster and
# lstsq can't handle >2D arrays anyway. This could perhaps be
# optimized by collecting together models with identical masks
# (eg. those with no rejected points) into one operation, though it
# will still be relatively slow when calling lstsq repeatedly.
for model_rhs, model_lacoef in zip(rhs.T, lacoef.T):
# Cull masked points on both sides of the matrix equation:
good = ~model_rhs.mask
model_lhs = lhs[good]
model_rhs = model_rhs[good][..., np.newaxis]
# Solve for this model:
t_coef, resids, rank, sval = np.linalg.lstsq(model_lhs,
model_rhs, rcond)
model_lacoef[:] = t_coef.T
self.fit_info['residuals'] = resids
self.fit_info['rank'] = rank
self.fit_info['singular_values'] = sval
lacoef = (lacoef.T / scl).T
self.fit_info['params'] = lacoef
# TODO: Only Polynomial models currently have an _order attribute;
# maybe change this to read isinstance(model, PolynomialBase)
if hasattr(model_copy, '_order') and rank != model_copy._order:
warnings.warn("The fit may be poorly conditioned\n",
AstropyUserWarning)
_fitter_to_model_params(model_copy, lacoef.flatten())
return model_copy
class FittingWithOutlierRemoval:
"""
This class combines an outlier removal technique with a fitting procedure.
Basically, given a number of iterations ``niter``, outliers are removed
and fitting is performed for each iteration.
Parameters
----------
fitter : An Astropy fitter
An instance of any Astropy fitter, i.e., LinearLSQFitter,
LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter. For
model set fitting, this must understand masked input data (as
indicated by the fitter class attribute ``supports_masked_input``).
outlier_func : function
A function for outlier removal.
If this accepts an ``axis`` parameter like the `numpy` functions, the
appropriate value will be supplied automatically when fitting model
sets (unless overridden in ``outlier_kwargs``), to find outliers for
each model separately; otherwise, the same filtering must be performed
in a loop over models, which is almost an order of magnitude slower.
niter : int (optional)
Number of iterations.
outlier_kwargs : dict (optional)
Keyword arguments for outlier_func.
"""
def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs):
self.fitter = fitter
self.outlier_func = outlier_func
self.niter = niter
self.outlier_kwargs = outlier_kwargs
def __str__(self):
return ("Fitter: {0}\nOutlier function: {1}\nNum. of iterations: {2}" +
("\nOutlier func. args.: {3}"))\
.format(self.fitter__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __repr__(self):
return ("{0}(fitter: {1}, outlier_func: {2}," +
" niter: {3}, outlier_kwargs: {4})")\
.format(self.__class__.__name__,
self.fitter.__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Parameters
----------
model : `~astropy.modeling.FittableModel`
An analytic model which will be fit to the provided data.
This also contains the initial guess for an optimization
algorithm.
x : array-like
Input coordinates.
y : array-like
Data measurements (1D case) or input coordinates (2D case).
z : array-like (optional)
Data measurements (2D case).
weights : array-like (optional)
Weights to be passed to the fitter.
kwargs : dict (optional)
Keyword arguments to be passed to the fitter.
Returns
-------
filtered_data : numpy.ma.core.MaskedArray
Data used to perform the fitting after outlier removal.
fitted_model : `~astropy.modeling.FittableModel`
Fitted model after outlier removal.
"""
# For single models, the data get filtered here at each iteration and
# then passed to the fitter, which is the historical behaviour and
# works even for fitters that don't understand masked arrays. For model
# sets, the fitter must be able to filter masked data internally,
# because fitters require a single set of x/y co-ordinates whereas the
# eliminated points can vary between models. To avoid this limitation,
# we could fall back to looping over individual model fits, but it
# would likely be fiddly and involve even more overhead (and the
# non-linear fitters don't work with model sets anyway, as of writing).
if len(model) == 1:
model_set_axis = None
else:
if not hasattr(self.fitter, 'supports_masked_input') or \
self.fitter.supports_masked_input is not True:
raise ValueError("{0} cannot fit model sets with masked "
"values".format(type(self.fitter).__name__))
# Fitters use their input model's model_set_axis to determine how
# their input data are stacked:
model_set_axis = model.model_set_axis
# Construct input co-ordinate tuples for fitters & models that are
# appropriate for the dimensionality being fitted:
if z is None:
coords = x,
data = y
else:
coords = x, y
data = z
# For model sets, construct a numpy-standard "axis" tuple for the
# outlier function, to treat each model separately (if supported):
if model_set_axis is not None:
if model_set_axis < 0:
model_set_axis += data.ndim
if 'axis' not in self.outlier_kwargs: # allow user override
# This also works for False (like model instantiation):
self.outlier_kwargs['axis'] = tuple(
n for n in range(data.ndim) if n != model_set_axis
)
loop = False
# Starting fit, prior to iteration and masking:
fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs)
filtered_data = data
filtered_weights = weights
# Perform the iterative fitting:
# TO DO: add a stopping criterion when results aren't changing?
for n in range(self.niter):
# (Re-)evaluate the last model:
model_vals = fitted_model(*coords, model_set_axis=False)
# Determine the outliers:
if not loop:
# Pass axis parameter if outlier_func accepts it, otherwise
# prepare for looping over models:
try:
filtered_data = self.outlier_func(
filtered_data - model_vals, **self.outlier_kwargs
)
# If this happens to catch an error with a parameter other
# than axis, the next attempt will fail accordingly:
except TypeError:
if model_set_axis is None:
raise
else:
self.outlier_kwargs.pop('axis', None)
loop = True
# Construct MaskedArray to hold filtered values:
filtered_data = np.ma.masked_array(
filtered_data,
dtype=np.result_type(filtered_data, model_vals),
copy=True
)
# Make sure the mask is an array, not just nomask:
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
# Get views transposed appropriately for iteration
# over the set (handling data & mask separately due to
# NumPy issue #8506):
data_T = np.rollaxis(filtered_data, model_set_axis, 0)
mask_T = np.rollaxis(filtered_data.mask,
model_set_axis, 0)
if loop:
model_vals_T = np.rollaxis(model_vals, model_set_axis, 0)
for row_data, row_mask, row_mod_vals in zip(data_T, mask_T,
model_vals_T):
masked_residuals = self.outlier_func(
row_data - row_mod_vals, **self.outlier_kwargs
)
row_data.data[:] = masked_residuals.data
row_mask[:] = masked_residuals.mask
# Issue speed warning after the fact, so it only shows up when
# the TypeError is genuinely due to the axis argument.
warnings.warn('outlier_func did not accept axis argument; '
'reverted to slow loop over models.',
AstropyUserWarning)
# Recombine newly-masked residuals with model to get masked values:
filtered_data += model_vals
# Re-fit the data after filtering, passing masked/unmasked values
# for single models / sets, respectively:
if model_set_axis is None:
good = ~filtered_data.mask
if weights is not None:
filtered_weights = weights[good]
fitted_model = self.fitter(fitted_model,
*(c[good] for c in coords),
filtered_data.data[good],
weights=filtered_weights, **kwargs)
else:
fitted_model = self.fitter(fitted_model, *coords,
filtered_data,
weights=filtered_weights, **kwargs)
return filtered_data, fitted_model
class LevMarLSQFitter(metaclass=_FitterMeta):
"""
Levenberg-Marquardt algorithm and least squares statistic.
Attributes
----------
fit_info : dict
The `scipy.optimize.leastsq` result for the most recent fit (see
notes).
Notes
-----
The ``fit_info`` dictionary contains the values returned by
`scipy.optimize.leastsq` for the most recent fit, including the values from
the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq`
documentation for details on the meaning of these values. Note that the
``x`` return value is *not* included (as it is instead the parameter values
of the returned model).
Additionally, one additional element of ``fit_info`` is computed whenever a
model is fit, with the key 'param_cov'. The corresponding value is the
covariance matrix of the parameters as a 2D numpy array. The order of the
matrix elements matches the order of the parameters in the fitted model
(i.e., the same order as ``model.param_names``).
"""
supported_constraints = ['fixed', 'tied', 'bounds']
"""
The constraint types supported by this fitter type.
"""
def __init__(self):
self.fit_info = {'nfev': None,
'fvec': None,
'fjac': None,
'ipvt': None,
'qtf': None,
'message': None,
'ierr': None,
'param_jac': None,
'param_cov': None}
super().__init__()
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [weights], [input coordinates]]
"""
model = args[0]
weights = args[1]
_fitter_to_model_params(model, fps)
meas = args[-1]
if weights is None:
return np.ravel(model(*args[2: -1]) - meas)
else:
return np.ravel(weights * (model(*args[2: -1]) - meas))
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None,
maxiter=DEFAULT_MAXITER, acc=DEFAULT_ACC,
epsilon=DEFAULT_EPS, estimate_jacobian=False):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
maxiter : int
maximum number of iterations
acc : float
Relative error desired in the approximate solution
epsilon : float
A suitable step length for the forward-difference
approximation of the Jacobian (if model.fjac=None). If
epsfcn is less than the machine precision, it is
assumed that the relative errors in the functions are
of the order of the machine precision.
estimate_jacobian : bool
If False (default) and if the model has a fit_deriv method,
it will be used. Otherwise the Jacobian will be estimated.
If True, the Jacobian will be estimated in any case.
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
from scipy import optimize
model_copy = _validate_model(model, self.supported_constraints)
farg = (model_copy, weights, ) + _convert_input(x, y, z)
if model_copy.fit_deriv is None or estimate_jacobian:
dfunc = None
else:
dfunc = self._wrap_deriv
init_values, _ = _model_to_fit_params(model_copy)
fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq(
self.objective_function, init_values, args=farg, Dfun=dfunc,
col_deriv=model_copy.col_fit_deriv, maxfev=maxiter, epsfcn=epsilon,
xtol=acc, full_output=True)
_fitter_to_model_params(model_copy, fitparams)
self.fit_info.update(dinfo)
self.fit_info['cov_x'] = cov_x
self.fit_info['message'] = mess
self.fit_info['ierr'] = ierr
if ierr not in [1, 2, 3, 4]:
warnings.warn("The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning)
# now try to compute the true covariance matrix
if (len(y) > len(init_values)) and cov_x is not None:
sum_sqrs = np.sum(self.objective_function(fitparams, *farg)**2)
dof = len(y) - len(init_values)
self.fit_info['param_cov'] = cov_x * sum_sqrs / dof
else:
self.fit_info['param_cov'] = None
return model_copy
@staticmethod
def _wrap_deriv(params, model, weights, x, y, z=None):
"""
Wraps the method calculating the Jacobian of the function to account
for model constraints.
`scipy.optimize.leastsq` expects the function derivative to have the
above signature (parlist, (argtuple)). In order to accommodate model
constraints, instead of using p directly, we set the parameter list in
this function.
"""
if weights is None:
weights = 1.0
if any(model.fixed.values()) or any(model.tied.values()):
# update the parameters with the current values from the fitter
_fitter_to_model_params(model, params)
if z is None:
full = np.array(model.fit_deriv(x, *model.parameters))
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
else:
full = np.array([np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)])
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
pars = [getattr(model, name) for name in model.param_names]
fixed = [par.fixed for par in pars]
tied = [par.tied for par in pars]
tied = list(np.where([par.tied is not False for par in pars],
True, tied))
fix_and_tie = np.logical_or(fixed, tied)
ind = np.logical_not(fix_and_tie)
if not model.col_fit_deriv:
residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
return [np.ravel(_) for _ in residues]
else:
if z is None:
return [np.ravel(_) for _ in np.ravel(weights) * np.array(model.fit_deriv(x, *params))]
else:
if not model.col_fit_deriv:
return [np.ravel(_) for _ in (
np.ravel(weights) * np.array(model.fit_deriv(x, y, *params)).T).T]
else:
return [np.ravel(_) for _ in (weights * np.array(model.fit_deriv(x, y, *params)))]
class SLSQPLSQFitter(Fitter):
"""
SLSQP optimization algorithm and least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
"""
supported_constraints = SLSQP.supported_constraints
def __init__(self):
super().__init__(optimizer=SLSQP, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
verblevel : int
0-silent
1-print summary upon completion,
2-print summary after each iteration
maxiter : int
maximum number of iterations
epsilon : float
the step size for finite-difference derivative estimates
acc : float
Requested accuracy
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
p0, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, p0, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
return model_copy
class SimplexLSQFitter(Fitter):
"""
Simplex algorithm and least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
"""
supported_constraints = Simplex.supported_constraints
def __init__(self):
super().__init__(optimizer=Simplex, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
maxiter : int
maximum number of iterations
acc : float
Relative error in approximate solution
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model,
self._opt_method.supported_constraints)
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
p0, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, p0, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
return model_copy
class JointFitter(metaclass=_FitterMeta):
"""
Fit models which share a parameter.
For example, fit two gaussians to two data sets but keep
the FWHM the same.
Parameters
----------
models : list
a list of model instances
jointparameters : list
a list of joint parameters
initvals : list
a list of initial values
"""
def __init__(self, models, jointparameters, initvals):
self.models = list(models)
self.initvals = list(initvals)
self.jointparams = jointparameters
self._verify_input()
self.fitparams = self._model_to_fit_params()
# a list of model.n_inputs
self.modeldims = [m.n_inputs for m in self.models]
# sum all model dimensions
self.ndim = np.sum(self.modeldims)
def _model_to_fit_params(self):
fparams = []
fparams.extend(self.initvals)
for model in self.models:
params = [p.flatten() for p in model.parameters]
joint_params = self.jointparams[model]
param_metrics = model._param_metrics
for param_name in joint_params:
slice_ = param_metrics[param_name]['slice']
del params[slice_]
fparams.extend(params)
return fparams
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
the fitted parameters - result of an one iteration of the
fitting algorithm
args : dict
tuple of measured and input coordinates
args is always passed as a tuple from optimize.leastsq
"""
lstsqargs = list(args)
fitted = []
fitparams = list(fps)
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fitparams[:numjp]
del fitparams[:numjp]
for model in self.models:
joint_params = self.jointparams[model]
margs = lstsqargs[:model.n_inputs + 1]
del lstsqargs[:model.n_inputs + 1]
# separate each model separately fitted parameters
numfp = len(model._parameters) - len(joint_params)
mfparams = fitparams[:numfp]
del fitparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the
# parameter is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
modelfit = model.evaluate(margs[:-1], *mparams)
fitted.extend(modelfit - margs[-1])
return np.ravel(fitted)
def _verify_input(self):
if len(self.models) <= 1:
raise TypeError("Expected >1 models, {} is given".format(
len(self.models)))
if len(self.jointparams.keys()) < 2:
raise TypeError("At least two parameters are expected, "
"{} is given".format(len(self.jointparams.keys())))
for j in self.jointparams.keys():
if len(self.jointparams[j]) != len(self.initvals):
raise TypeError("{} parameter(s) provided but {} expected".format(
len(self.jointparams[j]), len(self.initvals)))
def __call__(self, *args):
"""
Fit data to these models keeping some of the parameters common to the
two models.
"""
from scipy import optimize
if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims):
raise ValueError("Expected {} coordinates in args but {} provided"
.format(reduce(lambda x, y: x + 1 + y + 1,
self.modeldims), len(args)))
self.fitparams[:], _ = optimize.leastsq(self.objective_function,
self.fitparams, args=args)
fparams = self.fitparams[:]
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fparams[:numjp]
del fparams[:numjp]
for model in self.models:
# extract each model's fitted parameters
joint_params = self.jointparams[model]
numfp = len(model._parameters) - len(joint_params)
mfparams = fparams[:numfp]
del fparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the parameter
# is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
model.parameters = np.array(mparams)
def _convert_input(x, y, z=None, n_models=1, model_set_axis=0):
"""Convert inputs to float arrays."""
x = np.asanyarray(x, dtype=float)
y = np.asanyarray(y, dtype=float)
if z is not None:
z = np.asanyarray(z, dtype=float)
# For compatibility with how the linear fitter code currently expects to
# work, shift the dependent variable's axes to the expected locations
if n_models > 1:
if z is None:
if y.shape[model_set_axis] != n_models:
raise ValueError(
"Number of data sets (y array is expected to equal "
"the number of parameter sets)")
# For a 1-D model the y coordinate's model-set-axis is expected to
# be last, so that its first dimension is the same length as the x
# coordinates. This is in line with the expectations of
# numpy.linalg.lstsq:
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.lstsq.html
# That is, each model should be represented by a column. TODO:
# Obviously this is a detail of np.linalg.lstsq and should be
# handled specifically by any fitters that use it...
y = np.rollaxis(y, model_set_axis, y.ndim)
else:
# Shape of z excluding model_set_axis
z_shape = z.shape[:model_set_axis] + z.shape[model_set_axis + 1:]
if not (x.shape == y.shape == z_shape):
raise ValueError("x, y and z should have the same shape")
if z is None:
farg = (x, y)
else:
farg = (x, y, z)
return farg
# TODO: These utility functions are really particular to handling
# bounds/tied/fixed constraints for scipy.optimize optimizers that do not
# support them inherently; this needs to be reworked to be clear about this
# distinction (and the fact that these are not necessarily applicable to any
# arbitrary fitter--as evidenced for example by the fact that JointFitter has
# its own versions of these)
# TODO: Most of this code should be entirely rewritten; it should not be as
# inefficient as it is.
def _fitter_to_model_params(model, fps):
"""
Constructs the full list of model parameters from the fitted and
constrained parameters.
"""
_, fit_param_indices = _model_to_fit_params(model)
has_tied = any(model.tied.values())
has_fixed = any(model.fixed.values())
has_bound = any(b != (None, None) for b in model.bounds.values())
if not (has_tied or has_fixed or has_bound):
# We can just assign directly
model.parameters = fps
return
fit_param_indices = set(fit_param_indices)
offset = 0
param_metrics = model._param_metrics
for idx, name in enumerate(model.param_names):
if idx not in fit_param_indices:
continue
slice_ = param_metrics[name]['slice']
shape = param_metrics[name]['shape']
# This is determining which range of fps (the fitted parameters) maps
# to parameters of the model
size = reduce(operator.mul, shape, 1)
values = fps[offset:offset + size]
# Check bounds constraints
if model.bounds[name] != (None, None):
_min, _max = model.bounds[name]
if _min is not None:
values = np.fmax(values, _min)
if _max is not None:
values = np.fmin(values, _max)
model.parameters[slice_] = values
offset += size
# This has to be done in a separate loop due to how tied parameters are
# currently evaluated (the fitted parameters need to actually be *set* on
# the model first, for use in evaluating the "tied" expression--it might be
# better to change this at some point
if has_tied:
for idx, name in enumerate(model.param_names):
if model.tied[name]:
value = model.tied[name](model)
slice_ = param_metrics[name]['slice']
model.parameters[slice_] = value
def _model_to_fit_params(model):
"""
Convert a model instance's parameter array to an array that can be used
with a fitter that doesn't natively support fixed or tied parameters.
In particular, it removes fixed/tied parameters from the parameter
array.
These may be a subset of the model parameters, if some of them are held
constant or tied.
"""
fitparam_indices = list(range(len(model.param_names)))
if any(model.fixed.values()) or any(model.tied.values()):
params = list(model.parameters)
param_metrics = model._param_metrics
for idx, name in list(enumerate(model.param_names))[::-1]:
if model.fixed[name] or model.tied[name]:
slice_ = param_metrics[name]['slice']
del params[slice_]
del fitparam_indices[idx]
return (np.array(params), fitparam_indices)
else:
return (model.parameters, fitparam_indices)
def _validate_constraints(supported_constraints, model):
"""Make sure model constraints are supported by the current fitter."""
message = 'Optimizer cannot handle {0} constraints.'
if (any(model.fixed.values()) and
'fixed' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('fixed parameter'))
if any(model.tied.values()) and 'tied' not in supported_constraints:
raise UnsupportedConstraintError(
message.format('tied parameter'))
if (any(tuple(b) != (None, None) for b in model.bounds.values()) and
'bounds' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('bound parameter'))
if model.eqcons and 'eqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('equality'))
if model.ineqcons and 'ineqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('inequality'))
def _validate_model(model, supported_constraints):
"""
Check that model and fitter are compatible and return a copy of the model.
"""
if not model.fittable:
raise ValueError("Model does not appear to be fittable.")
if model.linear:
warnings.warn('Model is linear in parameters; '
'consider using linear fitting methods.',
AstropyUserWarning)
elif len(model) != 1:
# for now only single data sets ca be fitted
raise ValueError("Non-linear fitters can only fit "
"one data set at a time.")
_validate_constraints(supported_constraints, model)
model_copy = model.copy()
return model_copy
def populate_entry_points(entry_points):
"""
This injects entry points into the `astropy.modeling.fitting` namespace.
This provides a means of inserting a fitting routine without requirement
of it being merged into astropy's core.
Parameters
----------
entry_points : a list of `~pkg_resources.EntryPoint`
entry_points are objects which encapsulate
importable objects and are defined on the
installation of a package.
Notes
-----
An explanation of entry points can be found `here <http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`
"""
for entry_point in entry_points:
name = entry_point.name
try:
entry_point = entry_point.load()
except Exception as e:
# This stops the fitting from choking if an entry_point produces an error.
warnings.warn(AstropyUserWarning('{type} error occurred in entry '
'point {name}.' .format(type=type(e).__name__, name=name)))
else:
if not inspect.isclass(entry_point):
warnings.warn(AstropyUserWarning(
'Modeling entry point {0} expected to be a '
'Class.' .format(name)))
else:
if issubclass(entry_point, Fitter):
name = entry_point.__name__
globals()[name] = entry_point
__all__.append(name)
else:
warnings.warn(AstropyUserWarning(
'Modeling entry point {0} expected to extend '
'astropy.modeling.Fitter' .format(name)))
# this is so fitting doesn't choke if pkg_resources doesn't exist
if HAS_PKG:
populate_entry_points(iter_entry_points(group='astropy.modeling', name=None))
|
61937d37f580fb2fe88ba082253e21f8615e9f54b83fa9aceabd047b47a93773 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines two classes that deal with parameters.
It is unlikely users will need to work with these classes directly, unless they
define their own models.
"""
import functools
import numbers
import types
import operator
import numpy as np
from .. import units as u
from ..units import Quantity, UnitsError
from ..utils import isiterable, OrderedDescriptor
from .utils import array_repr_oneline
from .utils import get_inputs_and_params
__all__ = ['Parameter', 'InputParameterError', 'ParameterError']
class ParameterError(Exception):
"""Generic exception class for all exceptions pertaining to Parameters."""
class InputParameterError(ValueError, ParameterError):
"""Used for incorrect input parameter values and definitions."""
class ParameterDefinitionError(ParameterError):
"""Exception in declaration of class-level Parameters."""
def _tofloat(value):
"""Convert a parameter to float or float array"""
if isiterable(value):
try:
value = np.asanyarray(value, dtype=float)
except (TypeError, ValueError):
# catch arrays with strings or user errors like different
# types of parameters in a parameter set
raise InputParameterError(
"Parameter of {0} could not be converted to "
"float".format(type(value)))
elif isinstance(value, Quantity):
# Quantities are fine as is
pass
elif isinstance(value, np.ndarray):
# A scalar/dimensionless array
value = float(value.item())
elif isinstance(value, (numbers.Number, np.number)):
value = float(value)
elif isinstance(value, bool):
raise InputParameterError(
"Expected parameter to be of numerical type, not boolean")
else:
raise InputParameterError(
"Don't know how to convert parameter of {0} to "
"float".format(type(value)))
return value
# Helpers for implementing operator overloading on Parameter
def _binary_arithmetic_operation(op, reflected=False):
@functools.wraps(op)
def wrapper(self, val):
if self._model is None:
return NotImplemented
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
if reflected:
return op(val, self_value)
else:
return op(self_value, val)
return wrapper
def _binary_comparison_operation(op):
@functools.wraps(op)
def wrapper(self, val):
if self._model is None:
if op is operator.lt:
# Because OrderedDescriptor uses __lt__ to work, we need to
# call the super method, but only when not bound to an instance
# anyways
return super(self.__class__, self).__lt__(val)
else:
return NotImplemented
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
return op(self_value, val)
return wrapper
def _unary_arithmetic_operation(op):
@functools.wraps(op)
def wrapper(self):
if self._model is None:
return NotImplemented
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
return op(self_value)
return wrapper
class Parameter(OrderedDescriptor):
"""
Wraps individual parameters.
This class represents a model's parameter (in a somewhat broad sense). It
acts as both a descriptor that can be assigned to a class attribute to
describe the parameters accepted by an individual model (this is called an
"unbound parameter"), or it can act as a proxy for the parameter values on
an individual model instance (called a "bound parameter").
Parameter instances never store the actual value of the parameter directly.
Rather, each instance of a model stores its own parameters parameter values
in an array. A *bound* Parameter simply wraps the value in a Parameter
proxy which provides some additional information about the parameter such
as its constraints. In other words, this is a high-level interface to a
model's adjustable parameter values.
*Unbound* Parameters are not associated with any specific model instance,
and are merely used by model classes to determine the names of their
parameters and other information about each parameter such as their default
values and default constraints.
See :ref:`modeling-parameters` for more details.
Parameters
----------
name : str
parameter name
.. warning::
The fact that `Parameter` accepts ``name`` as an argument is an
implementation detail, and should not be used directly. When
defining a new `Model` class, parameter names are always
automatically defined by the class attribute they're assigned to.
description : str
parameter description
default : float or array
default value to use for this parameter
unit : `~astropy.units.Unit`
if specified, the parameter will be in these units, and when the
parameter is updated in future, it should be set to a
:class:`~astropy.units.Quantity` that has equivalent units.
getter : callable
a function that wraps the raw (internal) value of the parameter
when returning the value through the parameter proxy (eg. a
parameter may be stored internally as radians but returned to the
user as degrees)
setter : callable
a function that wraps any values assigned to this parameter; should
be the inverse of getter
fixed : bool
if True the parameter is not varied during fitting
tied : callable or False
if callable is supplied it provides a way to link the value of this
parameter to another parameter (or some other arbitrary function)
min : float
the lower bound of a parameter
max : float
the upper bound of a parameter
bounds : tuple
specify min and max as a single tuple--bounds may not be specified
simultaneously with min or max
model : `Model` instance
binds the the `Parameter` instance to a specific model upon
instantiation; this should only be used internally for creating bound
Parameters, and should not be used for `Parameter` descriptors defined
as class attributes
"""
constraints = ('fixed', 'tied', 'bounds')
"""
Types of constraints a parameter can have. Excludes 'min' and 'max'
which are just aliases for the first and second elements of the 'bounds'
constraint (which is represented as a 2-tuple).
"""
# Settings for OrderedDescriptor
_class_attribute_ = '_parameters_'
_name_attribute_ = '_name'
def __init__(self, name='', description='', default=None, unit=None,
getter=None, setter=None, fixed=False, tied=False, min=None,
max=None, bounds=None, model=None):
super().__init__()
self._name = name
self.__doc__ = self._description = description.strip()
# We only need to perform this check on unbound parameters
if model is None and isinstance(default, Quantity):
if unit is not None and not unit.is_equivalent(default.unit):
raise ParameterDefinitionError(
"parameter default {0} does not have units equivalent to "
"the required unit {1}".format(default, unit))
unit = default.unit
default = default.value
self._default = default
self._unit = unit
# NOTE: These are *default* constraints--on model instances constraints
# are taken from the model if set, otherwise the defaults set here are
# used
if bounds is not None:
if min is not None or max is not None:
raise ValueError(
'bounds may not be specified simultaneously with min or '
'or max when instantiating Parameter {0}'.format(name))
else:
bounds = (min, max)
self._fixed = fixed
self._tied = tied
self._bounds = bounds
self._order = None
self._model = None
# The getter/setter functions take one or two arguments: The first
# argument is always the value itself (either the value returned or the
# value being set). The second argument is optional, but if present
# will contain a reference to the model object tied to a parameter (if
# it exists)
self._getter = self._create_value_wrapper(getter, None)
self._setter = self._create_value_wrapper(setter, None)
self._validator = None
# Only Parameters declared as class-level descriptors require
# and ordering ID
if model is not None:
self._bind(model)
def __get__(self, obj, objtype):
if obj is None:
return self
# All of the Parameter.__init__ work should already have been done for
# the class-level descriptor; we can skip that stuff and just copy the
# existing __dict__ and then bind to the model instance
parameter = self.__class__.__new__(self.__class__)
parameter.__dict__.update(self.__dict__)
parameter._bind(obj)
return parameter
def __set__(self, obj, value):
value = _tofloat(value)
# Check that units are compatible with default or units already set
param_unit = obj._param_metrics[self.name]['orig_unit']
if param_unit is None:
if isinstance(value, Quantity):
obj._param_metrics[self.name]['orig_unit'] = value.unit
else:
if not isinstance(value, Quantity):
raise UnitsError("The '{0}' parameter should be given as a "
"Quantity because it was originally initialized "
"as a Quantity".format(self._name))
else:
# We need to make sure we update the unit because the units are
# then dropped from the value below.
obj._param_metrics[self.name]['orig_unit'] = value.unit
# Call the validator before the setter
if self._validator is not None:
self._validator(obj, value)
if self._setter is not None:
setter = self._create_value_wrapper(self._setter, obj)
if self.unit is not None:
value = setter(value * self.unit).value
else:
value = setter(value)
self._set_model_value(obj, value)
def __len__(self):
if self._model is None:
raise TypeError('Parameter definitions do not have a length.')
return len(self._model)
def __getitem__(self, key):
value = self.value
if len(self._model) == 1:
# Wrap the value in a list so that getitem can work for sensible
# indices like [0] and [-1]
value = [value]
return value[key]
def __setitem__(self, key, value):
# Get the existing value and check whether it even makes sense to
# apply this index
oldvalue = self.value
n_models = len(self._model)
# if n_models == 1:
# # Convert the single-dimension value to a list to allow some slices
# # that would be compatible with a length-1 array like [:] and [0:]
# oldvalue = [oldvalue]
if isinstance(key, slice):
if len(oldvalue[key]) == 0:
raise InputParameterError(
"Slice assignment outside the parameter dimensions for "
"'{0}'".format(self.name))
for idx, val in zip(range(*key.indices(len(self))), value):
self.__setitem__(idx, val)
else:
try:
oldvalue[key] = value
except IndexError:
raise InputParameterError(
"Input dimension {0} invalid for {1!r} parameter with "
"dimension {2}".format(key, self.name, n_models))
def __repr__(self):
args = "'{0}'".format(self._name)
if self._model is None:
if self._default is not None:
args += ', default={0}'.format(self._default)
else:
args += ', value={0}'.format(self.value)
if self.unit is not None:
args += ', unit={0}'.format(self.unit)
for cons in self.constraints:
val = getattr(self, cons)
if val not in (None, False, (None, None)):
# Maybe non-obvious, but False is the default for the fixed and
# tied constraints
args += ', {0}={1}'.format(cons, val)
return "{0}({1})".format(self.__class__.__name__, args)
@property
def name(self):
"""Parameter name"""
return self._name
@property
def default(self):
"""Parameter default value"""
if (self._model is None or self._default is None or
len(self._model) == 1):
return self._default
# Otherwise the model we are providing for has more than one parameter
# sets, so ensure that the default is repeated the correct number of
# times along the model_set_axis if necessary
n_models = len(self._model)
model_set_axis = self._model._model_set_axis
default = self._default
new_shape = (np.shape(default) +
(1,) * (model_set_axis + 1 - np.ndim(default)))
default = np.reshape(default, new_shape)
# Now roll the new axis into its correct position if necessary
default = np.rollaxis(default, -1, model_set_axis)
# Finally repeat the last newly-added axis to match n_models
default = np.repeat(default, n_models, axis=-1)
# NOTE: Regardless of what order the last two steps are performed in,
# the resulting array will *look* the same, but only if the repeat is
# performed last will it result in a *contiguous* array
return default
@property
def value(self):
"""The unadorned value proxied by this parameter."""
if self._model is None:
raise AttributeError('Parameter definition does not have a value')
value = self._get_model_value(self._model)
if self._getter is None:
return value
else:
raw_unit = self._model._param_metrics[self.name]['raw_unit']
orig_unit = self._model._param_metrics[self.name]['orig_unit']
if raw_unit is not None:
return np.float64(self._getter(value, raw_unit, orig_unit).value)
else:
return self._getter(value)
@value.setter
def value(self, value):
if self._model is None:
raise AttributeError('Cannot set a value on a parameter '
'definition')
if self._setter is not None:
val = self._setter(value)
if isinstance(value, Quantity):
raise TypeError("The .value property on parameters should be set to "
"unitless values, not Quantity objects. To set a "
"parameter to a quantity simply set the parameter "
"directly without using .value")
self._set_model_value(self._model, value)
@property
def unit(self):
"""
The unit attached to this parameter, if any.
On unbound parameters (i.e. parameters accessed through the
model class, rather than a model instance) this is the required/
default unit for the parameter.
"""
if self._model is None:
return self._unit
else:
# orig_unit may be undefined early on in model instantiation
return self._model._param_metrics[self.name].get('orig_unit',
self._unit)
@unit.setter
def unit(self, unit):
self._set_unit(unit)
def _set_unit(self, unit, force=False):
if self._model is None:
raise AttributeError('Cannot set unit on a parameter definition')
orig_unit = self._model._param_metrics[self.name]['orig_unit']
if force:
self._model._param_metrics[self.name]['orig_unit'] = unit
else:
if orig_unit is None:
raise ValueError('Cannot attach units to parameters that were '
'not initially specified with units')
else:
raise ValueError('Cannot change the unit attribute directly, '
'instead change the parameter to a new quantity')
@property
def quantity(self):
"""
This parameter, as a :class:`~astropy.units.Quantity` instance.
"""
if self.unit is not None:
return self.value * self.unit
else:
return None
@quantity.setter
def quantity(self, quantity):
if not isinstance(quantity, Quantity):
raise TypeError("The .quantity attribute should be set to a Quantity object")
self.value = quantity.value
self._set_unit(quantity.unit, force=True)
@property
def shape(self):
"""The shape of this parameter's value array."""
if self._model is None:
raise AttributeError('Parameter definition does not have a '
'shape.')
shape = self._model._param_metrics[self._name]['shape']
if len(self._model) > 1:
# If we are dealing with a model *set* the shape is the shape of
# the parameter within a single model in the set
model_axis = self._model._model_set_axis
if model_axis < 0:
model_axis = len(shape) + model_axis
shape = shape[:model_axis] + shape[model_axis + 1:]
else:
# When a model set is initialized, the dimension of the parameters
# is increased by model_set_axis+1. To find the shape of a parameter
# within a single model the extra dimensions need to be removed first.
# The following dimension shows the number of models.
# The rest of the shape tuple represents the shape of the parameter
# in a single model.
shape = shape[model_axis + 1:]
return shape
@property
def size(self):
"""The size of this parameter's value array."""
# TODO: Rather than using self.value this could be determined from the
# size of the parameter in _param_metrics
return np.size(self.value)
@property
def fixed(self):
"""
Boolean indicating if the parameter is kept fixed during fitting.
"""
if self._model is not None:
fixed = self._model._constraints['fixed']
return fixed.get(self._name, self._fixed)
else:
return self._fixed
@fixed.setter
def fixed(self, value):
"""Fix a parameter"""
if self._model is not None:
if not isinstance(value, bool):
raise TypeError("Fixed can be True or False")
self._model._constraints['fixed'][self._name] = value
else:
raise AttributeError("can't set attribute 'fixed' on Parameter "
"definition")
@property
def tied(self):
"""
Indicates that this parameter is linked to another one.
A callable which provides the relationship of the two parameters.
"""
if self._model is not None:
tied = self._model._constraints['tied']
return tied.get(self._name, self._tied)
else:
return self._tied
@tied.setter
def tied(self, value):
"""Tie a parameter"""
if self._model is not None:
if not callable(value) and value not in (False, None):
raise TypeError("Tied must be a callable")
self._model._constraints['tied'][self._name] = value
else:
raise AttributeError("can't set attribute 'tied' on Parameter "
"definition")
@property
def bounds(self):
"""The minimum and maximum values of a parameter as a tuple"""
if self._model is not None:
bounds = self._model._constraints['bounds']
return bounds.get(self._name, self._bounds)
else:
return self._bounds
@bounds.setter
def bounds(self, value):
"""Set the minimum and maximum values of a parameter from a tuple"""
if self._model is not None:
_min, _max = value
if _min is not None:
if not isinstance(_min, numbers.Number):
raise TypeError("Min value must be a number")
_min = float(_min)
if _max is not None:
if not isinstance(_max, numbers.Number):
raise TypeError("Max value must be a number")
_max = float(_max)
bounds = self._model._constraints.setdefault('bounds', {})
self._model._constraints['bounds'][self._name] = (_min, _max)
else:
raise AttributeError("can't set attribute 'bounds' on Parameter "
"definition")
@property
def min(self):
"""A value used as a lower bound when fitting a parameter"""
return self.bounds[0]
@min.setter
def min(self, value):
"""Set a minimum value of a parameter"""
if self._model is not None:
self.bounds = (value, self.max)
else:
raise AttributeError("can't set attribute 'min' on Parameter "
"definition")
@property
def max(self):
"""A value used as an upper bound when fitting a parameter"""
return self.bounds[1]
@max.setter
def max(self, value):
"""Set a maximum value of a parameter."""
if self._model is not None:
self.bounds = (self.min, value)
else:
raise AttributeError("can't set attribute 'max' on Parameter "
"definition")
@property
def validator(self):
"""
Used as a decorator to set the validator method for a `Parameter`.
The validator method validates any value set for that parameter.
It takes two arguments--``self``, which refers to the `Model`
instance (remember, this is a method defined on a `Model`), and
the value being set for this parameter. The validator method's
return value is ignored, but it may raise an exception if the value
set on the parameter is invalid (typically an `InputParameterError`
should be raised, though this is not currently a requirement).
The decorator *returns* the `Parameter` instance that the validator
is set on, so the underlying validator method should have the same
name as the `Parameter` itself (think of this as analogous to
``property.setter``). For example::
>>> from astropy.modeling import Fittable1DModel
>>> class TestModel(Fittable1DModel):
... a = Parameter()
... b = Parameter()
...
... @a.validator
... def a(self, value):
... # Remember, the value can be an array
... if np.any(value < self.b):
... raise InputParameterError(
... "parameter 'a' must be greater than or equal "
... "to parameter 'b'")
...
... @staticmethod
... def evaluate(x, a, b):
... return a * x + b
...
>>> m = TestModel(a=1, b=2) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InputParameterError: parameter 'a' must be greater than or equal
to parameter 'b'
>>> m = TestModel(a=2, b=2)
>>> m.a = 0 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InputParameterError: parameter 'a' must be greater than or equal
to parameter 'b'
On bound parameters this property returns the validator method itself,
as a bound method on the `Parameter`. This is not often as useful, but
it allows validating a parameter value without setting that parameter::
>>> m.a.validator(42) # Passes
>>> m.a.validator(-42) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InputParameterError: parameter 'a' must be greater than or equal
to parameter 'b'
"""
if self._model is None:
# For unbound parameters return the validator setter
def validator(func, self=self):
self._validator = func
return self
return validator
else:
# Return the validator method, bound to the Parameter instance with
# the name "validator"
def validator(self, value):
if self._validator is not None:
return self._validator(self._model, value)
return types.MethodType(validator, self)
def copy(self, name=None, description=None, default=None, unit=None,
getter=None, setter=None, fixed=False, tied=False, min=None,
max=None, bounds=None):
"""
Make a copy of this `Parameter`, overriding any of its core attributes
in the process (or an exact copy).
The arguments to this method are the same as those for the `Parameter`
initializer. This simply returns a new `Parameter` instance with any
or all of the attributes overridden, and so returns the equivalent of:
.. code:: python
Parameter(self.name, self.description, ...)
"""
kwargs = locals().copy()
del kwargs['self']
for key, value in kwargs.items():
if value is None:
# Annoying special cases for min/max where are just aliases for
# the components of bounds
if key in ('min', 'max'):
continue
else:
if hasattr(self, key):
value = getattr(self, key)
elif hasattr(self, '_' + key):
value = getattr(self, '_' + key)
kwargs[key] = value
return self.__class__(**kwargs)
@property
def _raw_value(self):
"""
Currently for internal use only.
Like Parameter.value but does not pass the result through
Parameter.getter. By design this should only be used from bound
parameters.
This will probably be removed are retweaked at some point in the
process of rethinking how parameter values are stored/updated.
"""
return self._get_model_value(self._model)
def _bind(self, model):
"""
Bind the `Parameter` to a specific `Model` instance; don't use this
directly on *unbound* parameters, i.e. `Parameter` descriptors that
are defined in class bodies.
"""
self._model = model
self._getter = self._create_value_wrapper(self._getter, model)
self._setter = self._create_value_wrapper(self._setter, model)
# TODO: These methods should probably be moved to the Model class, since it
# has entirely to do with details of how the model stores parameters.
# Parameter should just act as a user front-end to this.
def _get_model_value(self, model):
"""
This method implements how to retrieve the value of this parameter from
the model instance. See also `Parameter._set_model_value`.
These methods take an explicit model argument rather than using
self._model so that they can be used from unbound `Parameter`
instances.
"""
if not hasattr(model, '_parameters'):
# The _parameters array hasn't been initialized yet; just translate
# this to an AttributeError
raise AttributeError(self._name)
# Use the _param_metrics to extract the parameter value from the
# _parameters array
param_metrics = model._param_metrics[self._name]
param_slice = param_metrics['slice']
param_shape = param_metrics['shape']
value = model._parameters[param_slice]
if param_shape:
value = value.reshape(param_shape)
else:
value = value[0]
return value
def _set_model_value(self, model, value):
"""
This method implements how to store the value of a parameter on the
model instance.
Currently there is only one storage mechanism (via the ._parameters
array) but other mechanisms may be desireable, in which case really the
model class itself should dictate this and *not* `Parameter` itself.
"""
def _update_parameter_value(model, name, value):
# TODO: Maybe handle exception on invalid input shape
param_metrics = model._param_metrics[name]
param_slice = param_metrics['slice']
param_shape = param_metrics['shape']
param_size = np.prod(param_shape)
if np.size(value) != param_size:
raise InputParameterError(
"Input value for parameter {0!r} does not have {1} elements "
"as the current value does".format(name, param_size))
model._parameters[param_slice] = np.array(value).ravel()
_update_parameter_value(model, self._name, value)
if hasattr(model, "_param_map"):
submodel_ind, param_name = model._param_map[self._name]
if hasattr(model._submodels[submodel_ind], "_param_metrics"):
_update_parameter_value(model._submodels[submodel_ind], param_name, value)
@staticmethod
def _create_value_wrapper(wrapper, model):
"""Wraps a getter/setter function to support optionally passing in
a reference to the model object as the second argument.
If a model is tied to this parameter and its getter/setter supports
a second argument then this creates a partial function using the model
instance as the second argument.
"""
if isinstance(wrapper, np.ufunc):
if wrapper.nin != 1:
raise TypeError("A numpy.ufunc used for Parameter "
"getter/setter may only take one input "
"argument")
elif wrapper is None:
# Just allow non-wrappers to fall through silently, for convenience
return None
else:
inputs, params = get_inputs_and_params(wrapper)
nargs = len(inputs)
if nargs == 1:
pass
elif nargs == 2:
if model is not None:
# Don't make a partial function unless we're tied to a
# specific model instance
model_arg = inputs[1].name
wrapper = functools.partial(wrapper, **{model_arg: model})
else:
raise TypeError("Parameter getter/setter must be a function "
"of either one or two arguments")
return wrapper
def __array__(self, dtype=None):
# Make np.asarray(self) work a little more straightforwardly
arr = np.asarray(self.value, dtype=dtype)
if self.unit is not None:
arr = Quantity(arr, self.unit, copy=False)
return arr
def __bool__(self):
if self._model is None:
return True
else:
return bool(self.value)
__add__ = _binary_arithmetic_operation(operator.add)
__radd__ = _binary_arithmetic_operation(operator.add, reflected=True)
__sub__ = _binary_arithmetic_operation(operator.sub)
__rsub__ = _binary_arithmetic_operation(operator.sub, reflected=True)
__mul__ = _binary_arithmetic_operation(operator.mul)
__rmul__ = _binary_arithmetic_operation(operator.mul, reflected=True)
__pow__ = _binary_arithmetic_operation(operator.pow)
__rpow__ = _binary_arithmetic_operation(operator.pow, reflected=True)
__div__ = _binary_arithmetic_operation(operator.truediv)
__rdiv__ = _binary_arithmetic_operation(operator.truediv, reflected=True)
__truediv__ = _binary_arithmetic_operation(operator.truediv)
__rtruediv__ = _binary_arithmetic_operation(operator.truediv, reflected=True)
__eq__ = _binary_comparison_operation(operator.eq)
__ne__ = _binary_comparison_operation(operator.ne)
__lt__ = _binary_comparison_operation(operator.lt)
__gt__ = _binary_comparison_operation(operator.gt)
__le__ = _binary_comparison_operation(operator.le)
__ge__ = _binary_comparison_operation(operator.ge)
__neg__ = _unary_arithmetic_operation(operator.neg)
__abs__ = _unary_arithmetic_operation(operator.abs)
def param_repr_oneline(param):
"""
Like array_repr_oneline but works on `Parameter` objects and supports
rendering parameters with units like quantities.
"""
out = array_repr_oneline(param.value)
if param.unit is not None:
out = '{0} {1!s}'.format(out, param.unit)
return out
|
1628b53a239aa5b795583902d2f2a4a3829cb50e0fc7b4ca32e50edd58b7e705 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines base classes for all models. The base class of all
models is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is
the base class for all fittable models. Fittable models can be linear or
nonlinear in a regression analysis sense.
All models provide a `__call__` method which performs the transformation in
a purely mathematical way, i.e. the models are unitless. Model instances can
represent either a single model, or a "model set" representing multiple copies
of the same type of model, but with potentially different values of the
parameters in each model making up the set.
"""
import abc
import copy
import copyreg
import inspect
import functools
import operator
import types
import warnings
from collections import defaultdict, OrderedDict
from contextlib import suppress
from inspect import signature
from itertools import chain, islice
from functools import partial
import numpy as np
from ..utils import indent, metadata
from ..table import Table
from ..units import Quantity, UnitsError, dimensionless_unscaled
from ..units.utils import quantity_asanyarray
from ..utils import (sharedmethod, find_current_module,
InheritDocstrings, OrderedDescriptorContainer,
check_broadcast, IncompatibleShapeError, isiterable)
from ..utils.codegen import make_function_with_signature
from ..utils.exceptions import AstropyDeprecationWarning
from .utils import (combine_labels, make_binary_operator_eval,
ExpressionTree, AliasDict, get_inputs_and_params,
_BoundingBox, _combine_equivalency_dict)
from ..nddata.utils import add_array, extract_array
from .parameters import Parameter, InputParameterError, param_repr_oneline
__all__ = ['Model', 'FittableModel', 'Fittable1DModel', 'Fittable2DModel',
'custom_model', 'ModelDefinitionError']
class ModelDefinitionError(TypeError):
"""Used for incorrect models definitions"""
def _model_oper(oper, **kwargs):
"""
Returns a function that evaluates a given Python arithmetic operator
between two models. The operator should be given as a string, like ``'+'``
or ``'**'``.
Any additional keyword arguments passed in are passed to
`_CompoundModelMeta._from_operator`.
"""
# Note: Originally this used functools.partial, but that won't work when
# used in the class definition of _CompoundModelMeta since
# _CompoundModelMeta has not been defined yet.
# Perform an arithmetic operation on two models.
return lambda left, right: _CompoundModelMeta._from_operator(oper, left,
right, **kwargs)
class _ModelMeta(OrderedDescriptorContainer, InheritDocstrings, abc.ABCMeta):
"""
Metaclass for Model.
Currently just handles auto-generating the param_names list based on
Parameter descriptors declared at the class-level of Model subclasses.
"""
_is_dynamic = False
"""
This flag signifies whether this class was created in the "normal" way,
with a class statement in the body of a module, as opposed to a call to
`type` or some other metaclass constructor, such that the resulting class
does not belong to a specific module. This is important for pickling of
dynamic classes.
This flag is always forced to False for new classes, so code that creates
dynamic classes should manually set it to True on those classes when
creating them.
"""
# Default empty dict for _parameters_, which will be empty on model
# classes that don't have any Parameters
_parameters_ = OrderedDict()
def __new__(mcls, name, bases, members):
# See the docstring for _is_dynamic above
if '_is_dynamic' not in members:
members['_is_dynamic'] = mcls._is_dynamic
return super().__new__(mcls, name, bases, members)
def __init__(cls, name, bases, members):
# Make sure OrderedDescriptorContainer gets to run before doing
# anything else
super().__init__(name, bases, members)
if cls._parameters_:
if hasattr(cls, '_param_names'):
# Slight kludge to support compound models, where
# cls.param_names is a property; could be improved with a
# little refactoring but fine for now
cls._param_names = tuple(cls._parameters_)
else:
cls.param_names = tuple(cls._parameters_)
cls._create_inverse_property(members)
cls._create_bounding_box_property(members)
cls._handle_special_methods(members)
def __repr__(cls):
"""
Custom repr for Model subclasses.
"""
return cls._format_cls_repr()
def _repr_pretty_(cls, p, cycle):
"""
Repr for IPython's pretty printer.
By default IPython "pretty prints" classes, so we need to implement
this so that IPython displays the custom repr for Models.
"""
p.text(repr(cls))
def __reduce__(cls):
if not cls._is_dynamic:
# Just return a string specifying where the class can be imported
# from
return cls.__name__
else:
members = dict(cls.__dict__)
# Delete any ABC-related attributes--these will be restored when
# the class is reconstructed:
for key in list(members):
if key.startswith('_abc_'):
del members[key]
# Delete custom __init__ and __call__ if they exist:
for key in ('__init__', '__call__'):
if key in members:
del members[key]
return (type(cls), (cls.__name__, cls.__bases__, members))
@property
def name(cls):
"""
The name of this model class--equivalent to ``cls.__name__``.
This attribute is provided for symmetry with the `Model.name` attribute
of model instances.
"""
return cls.__name__
@property
def n_inputs(cls):
return len(cls.inputs)
@property
def n_outputs(cls):
return len(cls.outputs)
@property
def _is_concrete(cls):
"""
A class-level property that determines whether the class is a concrete
implementation of a Model--i.e. it is not some abstract base class or
internal implementation detail (i.e. begins with '_').
"""
return not (cls.__name__.startswith('_') or inspect.isabstract(cls))
def rename(cls, name):
"""
Creates a copy of this model class with a new name.
The new class is technically a subclass of the original class, so that
instance and type checks will still work. For example::
>>> from astropy.modeling.models import Rotation2D
>>> SkyRotation = Rotation2D.rename('SkyRotation')
>>> SkyRotation
<class '__main__.SkyRotation'>
Name: SkyRotation (Rotation2D)
Inputs: ('x', 'y')
Outputs: ('x', 'y')
Fittable parameters: ('angle',)
>>> issubclass(SkyRotation, Rotation2D)
True
>>> r = SkyRotation(90)
>>> isinstance(r, Rotation2D)
True
"""
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = '__main__'
new_cls = type(name, (cls,), {})
new_cls.__module__ = modname
if hasattr(cls, '__qualname__'):
if new_cls.__module__ == '__main__':
# __main__ is not added to a class's qualified name
new_cls.__qualname__ = name
else:
new_cls.__qualname__ = '{0}.{1}'.format(modname, name)
return new_cls
def _create_inverse_property(cls, members):
inverse = members.get('inverse')
if inverse is None or cls.__bases__[0] is object:
# The latter clause is the prevent the below code from running on
# the Model base class, which implements the default getter and
# setter for .inverse
return
if isinstance(inverse, property):
# We allow the @property decorator to be omitted entirely from
# the class definition, though its use should be encouraged for
# clarity
inverse = inverse.fget
# Store the inverse getter internally, then delete the given .inverse
# attribute so that cls.inverse resolves to Model.inverse instead
cls._inverse = inverse
del cls.inverse
def _create_bounding_box_property(cls, members):
"""
Takes any bounding_box defined on a concrete Model subclass (either
as a fixed tuple or a property or method) and wraps it in the generic
getter/setter interface for the bounding_box attribute.
"""
# TODO: Much of this is verbatim from _create_inverse_property--I feel
# like there could be a way to generify properties that work this way,
# but for the time being that would probably only confuse things more.
bounding_box = members.get('bounding_box')
if bounding_box is None or cls.__bases__[0] is object:
return
if isinstance(bounding_box, property):
bounding_box = bounding_box.fget
if not callable(bounding_box):
# See if it's a hard-coded bounding_box (as a sequence) and
# normalize it
try:
bounding_box = _BoundingBox.validate(cls, bounding_box)
except ValueError as exc:
raise ModelDefinitionError(exc.args[0])
else:
sig = signature(bounding_box)
# May be a method that only takes 'self' as an argument (like a
# property, but the @property decorator was forgotten)
# TODO: Maybe warn in the above case?
#
# However, if the method takes additional arguments then this is a
# parameterized bounding box and should be callable
if len(sig.parameters) > 1:
bounding_box = \
cls._create_bounding_box_subclass(bounding_box, sig)
# See the Model.bounding_box getter definition for how this attribute
# is used
cls._bounding_box = bounding_box
del cls.bounding_box
def _create_bounding_box_subclass(cls, func, sig):
"""
For Models that take optional arguments for defining their bounding
box, we create a subclass of _BoundingBox with a ``__call__`` method
that supports those additional arguments.
Takes the function's Signature as an argument since that is already
computed in _create_bounding_box_property, so no need to duplicate that
effort.
"""
# TODO: Might be convenient if calling the bounding box also
# automatically sets the _user_bounding_box. So that
#
# >>> model.bounding_box(arg=1)
#
# in addition to returning the computed bbox, also sets it, so that
# it's a shortcut for
#
# >>> model.bounding_box = model.bounding_box(arg=1)
#
# Not sure if that would be non-obvious / confusing though...
def __call__(self, **kwargs):
return func(self._model, **kwargs)
kwargs = []
for idx, param in enumerate(sig.parameters.values()):
if idx == 0:
# Presumed to be a 'self' argument
continue
if param.default is param.empty:
raise ModelDefinitionError(
'The bounding_box method for {0} is not correctly '
'defined: If defined as a method all arguments to that '
'method (besides self) must be keyword arguments with '
'default values that can be used to compute a default '
'bounding box.'.format(cls.name))
kwargs.append((param.name, param.default))
__call__ = make_function_with_signature(__call__, ('self',), kwargs)
return type(str('_{0}BoundingBox'.format(cls.name)), (_BoundingBox,),
{'__call__': __call__})
def _handle_special_methods(cls, members):
# Handle init creation from inputs
def update_wrapper(wrapper, cls):
# Set up the new __call__'s metadata attributes as though it were
# manually defined in the class definition
# A bit like functools.update_wrapper but uses the class instead of
# the wrapped function
wrapper.__module__ = cls.__module__
wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__
if hasattr(cls, '__qualname__'):
wrapper.__qualname__ = '{0}.{1}'.format(
cls.__qualname__, wrapper.__name__)
if ('__call__' not in members and 'inputs' in members and
isinstance(members['inputs'], tuple)):
# Don't create a custom __call__ for classes that already have one
# explicitly defined (this includes the Model base class, and any
# other classes that manually override __call__
def __call__(self, *inputs, **kwargs):
"""Evaluate this model on the supplied inputs."""
return super(cls, self).__call__(*inputs, **kwargs)
# When called, models can take two optional keyword arguments:
#
# * model_set_axis, which indicates (for multi-dimensional input)
# which axis is used to indicate different models
#
# * equivalencies, a dictionary of equivalencies to be applied to
# the input values, where each key should correspond to one of
# the inputs.
#
# The following code creates the __call__ function with these
# two keyword arguments.
inputs = members['inputs']
args = ('self',) + inputs
new_call = make_function_with_signature(
__call__, args, [('model_set_axis', None),
('with_bounding_box', False),
('fill_value', np.nan),
('equivalencies', None)])
# The following makes it look like __call__ was defined in the class
update_wrapper(new_call, cls)
cls.__call__ = new_call
if ('__init__' not in members and not inspect.isabstract(cls) and
cls._parameters_):
# If *all* the parameters have default values we can make them
# keyword arguments; otherwise they must all be positional arguments
if all(p.default is not None for p in cls._parameters_.values()):
args = ('self',)
kwargs = []
for param_name in cls.param_names:
default = cls._parameters_[param_name].default
unit = cls._parameters_[param_name].unit
# If the unit was specified in the parameter but the default
# is not a Quantity, attach the unit to the default.
if unit is not None:
default = Quantity(default, unit, copy=False)
kwargs.append((param_name, default))
else:
args = ('self',) + cls.param_names
kwargs = {}
def __init__(self, *params, **kwargs):
return super(cls, self).__init__(*params, **kwargs)
new_init = make_function_with_signature(
__init__, args, kwargs, varkwargs='kwargs')
update_wrapper(new_init, cls)
cls.__init__ = new_init
# *** Arithmetic operators for creating compound models ***
__add__ = _model_oper('+')
__sub__ = _model_oper('-')
__mul__ = _model_oper('*')
__truediv__ = _model_oper('/')
__pow__ = _model_oper('**')
__or__ = _model_oper('|')
__and__ = _model_oper('&')
# *** Other utilities ***
def _format_cls_repr(cls, keywords=[]):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
# For the sake of familiarity start the output with the standard class
# __repr__
parts = [super().__repr__()]
if not cls._is_concrete:
return parts[0]
def format_inheritance(cls):
bases = []
for base in cls.mro()[1:]:
if not issubclass(base, Model):
continue
elif (inspect.isabstract(base) or
base.__name__.startswith('_')):
break
bases.append(base.name)
if bases:
return '{0} ({1})'.format(cls.name, ' -> '.join(bases))
else:
return cls.name
try:
default_keywords = [
('Name', format_inheritance(cls)),
('Inputs', cls.inputs),
('Outputs', cls.outputs),
]
if cls.param_names:
default_keywords.append(('Fittable parameters',
cls.param_names))
for keyword, value in default_keywords + keywords:
if value is not None:
parts.append('{0}: {1}'.format(keyword, value))
return '\n'.join(parts)
except Exception:
# If any of the above formatting fails fall back on the basic repr
# (this is particularly useful in debugging)
return parts[0]
class Model(metaclass=_ModelMeta):
"""
Base class for all models.
This is an abstract class and should not be instantiated directly.
This class sets the constraints and other properties for all individual
parameters and performs parameter validation.
The following initialization arguments apply to the majority of Model
subclasses by default (exceptions include specialized utility models
like `~astropy.modeling.mappings.Mapping`). Parametric models take all
their parameters as arguments, followed by any of the following optional
keyword arguments:
Parameters
----------
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict, optional
An optional dict of user-defined metadata to attach to this model.
How this is used and interpreted is up to the user or individual use
case.
n_models : int, optional
If given an integer greater than 1, a *model set* is instantiated
instead of a single model. This affects how the parameter arguments
are interpreted. In this case each parameter must be given as a list
or array--elements of this array are taken along the first axis (or
``model_set_axis`` if specified), such that the Nth element is the
value of that parameter for the Nth model in the set.
See the section on model sets in the documentation for more details.
model_set_axis : int, optional
This argument only applies when creating a model set (i.e. ``n_models >
1``). It changes how parameter values are interpreted. Normally the
first axis of each input parameter array (properly the 0th axis) is
taken as the axis corresponding to the model sets. However, any axis
of an input array may be taken as this "model set axis". This accepts
negative integers as well--for example use ``model_set_axis=-1`` if the
last (most rapidly changing) axis should be associated with the model
sets. Also, ``model_set_axis=False`` can be used to tell that a given
input should be used to evaluate all the models in the model set.
fixed : dict, optional
Dictionary ``{parameter_name: bool}`` setting the fixed constraint
for one or more parameters. `True` means the parameter is held fixed
during fitting and is prevented from updates once an instance of the
model has been created.
Alternatively the `~astropy.modeling.Parameter.fixed` property of a
parameter may be used to lock or unlock individual parameters.
tied : dict, optional
Dictionary ``{parameter_name: callable}`` of parameters which are
linked to some other parameter. The dictionary values are callables
providing the linking relationship.
Alternatively the `~astropy.modeling.Parameter.tied` property of a
parameter may be used to set the ``tied`` constraint on individual
parameters.
bounds : dict, optional
Dictionary ``{parameter_name: value}`` of lower and upper bounds of
parameters. Keys are parameter names. Values are a list of length 2
giving the desired range for the parameter.
Alternatively the `~astropy.modeling.Parameter.min` and
`~astropy.modeling.Parameter.max` or
~astropy.modeling.Parameter.bounds` properties of a parameter may be
used to set bounds on individual parameters.
eqcons : list, optional
List of functions of length n such that ``eqcons[j](x0, *args) == 0.0``
in a successfully optimized problem.
ineqcons : list, optional
List of functions of length n such that ``ieqcons[j](x0, *args) >=
0.0`` is a successfully optimized problem.
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that ``'mean'`` is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
"""
parameter_constraints = Parameter.constraints
"""
Primarily for informational purposes, these are the types of constraints
that can be set on a model's parameters.
"""
model_constraints = ('eqcons', 'ineqcons')
"""
Primarily for informational purposes, these are the types of constraints
that constrain model evaluation.
"""
param_names = ()
"""
Names of the parameters that describe models of this type.
The parameters in this tuple are in the same order they should be passed in
when initializing a model of a specific type. Some types of models, such
as polynomial models, have a different number of parameters depending on
some other property of the model, such as the degree.
When defining a custom model class the value of this attribute is
automatically set by the `~astropy.modeling.Parameter` attributes defined
in the class body.
"""
inputs = ()
"""The name(s) of the input variable(s) on which a model is evaluated."""
outputs = ()
"""The name(s) of the output(s) of the model."""
standard_broadcasting = True
fittable = False
linear = True
_separable = None
""" A boolean flag to indicate whether a model is separable."""
meta = metadata.MetaData()
"""A dict-like object to store optional information."""
# By default models either use their own inverse property or have no
# inverse at all, but users may also assign a custom inverse to a model,
# optionally; in that case it is of course up to the user to determine
# whether their inverse is *actually* an inverse to the model they assign
# it to.
_inverse = None
_user_inverse = None
_bounding_box = None
_user_bounding_box = None
# Default n_models attribute, so that __len__ is still defined even when a
# model hasn't completed initialization yet
_n_models = 1
# New classes can set this as a boolean value.
# It is converted to a dictionary mapping input name to a boolean value.
_input_units_strict = False
# Allow dimensionless input (and corresponding output). If this is True,
# input values to evaluate will gain the units specified in input_units. If
# this is a dictionary then it should map input name to a bool to allow
# dimensionless numbers for that input.
# Only has an effect if input_units is defined.
_input_units_allow_dimensionless = False
# Default equivalencies to apply to input values. If set, this should be a
# dictionary where each key is a string that corresponds to one of the
# model inputs. Only has an effect if input_units is defined.
input_units_equivalencies = None
def __init__(self, *args, meta=None, name=None, **kwargs):
super().__init__()
if meta is not None:
self.meta = meta
self._name = name
self._initialize_constraints(kwargs)
# Remaining keyword args are either parameter values or invalid
# Parameter values must be passed in as keyword arguments in order to
# distinguish them
self._initialize_parameters(args, kwargs)
self._initialize_unit_support()
def _initialize_unit_support(self):
"""
Convert self._input_units_strict and
self.input_units_allow_dimensionless to dictionaries
mapping input name to a boolena value.
"""
if isinstance(self._input_units_strict, bool):
self._input_units_strict = {key: self._input_units_strict for
key in self.__class__.inputs}
if isinstance(self._input_units_allow_dimensionless, bool):
self._input_units_allow_dimensionless = {key: self._input_units_allow_dimensionless
for key in self.__class__.inputs}
@property
def input_units_strict(self):
"""
Enforce strict units on inputs to evaluate. If this is set to True,
input values to evaluate will be in the exact units specified by
input_units. If the input quantities are convertible to input_units,
they are converted. If this is a dictionary then it should map input
name to a bool to set strict input units for that parameter.
"""
return self._input_units_strict
@input_units_strict.setter
def input_units_strict(self, val):
if isinstance(val, bool):
self._input_units_strict = {key: val for key in self.__class__.inputs}
else:
self._input_units_strict = val
@property
def input_units_allow_dimensionless(self):
"""
Allow dimensionless input (and corresponding output). If this is True,
input values to evaluate will gain the units specified in input_units. If
this is a dictionary then it should map input name to a bool to allow
dimensionless numbers for that input.
Only has an effect if input_units is defined.
"""
return self._input_units_allow_dimensionless
@input_units_allow_dimensionless.setter
def input_units_allow_dimensionless(self, val):
if isinstance(val, bool):
self._input_units_allow_dimensionless = {key: val for key in self.__class__.inputs}
else:
self._input_units_allow_dimensionless = val
def __repr__(self):
return self._format_repr()
def __str__(self):
return self._format_str()
def __len__(self):
return self._n_models
def __call__(self, *inputs, **kwargs):
"""
Evaluate this model using the given input(s) and the parameter values
that were specified when the model was instantiated.
"""
inputs, format_info = self.prepare_inputs(*inputs, **kwargs)
# Check whether any of the inputs are quantities
inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs])
parameters = self._param_sets(raw=True, units=True)
with_bbox = kwargs.pop('with_bounding_box', False)
fill_value = kwargs.pop('fill_value', np.nan)
bbox = None
if with_bbox:
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
if self.n_inputs > 1 and bbox is not None:
# bounding_box is in python order - convert it to the order of the inputs
bbox = bbox[::-1]
if bbox is None:
outputs = self.evaluate(*chain(inputs, parameters))
else:
if self.n_inputs == 1:
bbox = [bbox]
# indices where input is outside the bbox
# have a value of 1 in ``nan_ind``
nan_ind = np.zeros(inputs[0].shape, dtype=bool)
for ind, inp in enumerate(inputs):
# Pass an ``out`` array so that ``axis_ind`` is array for scalars as well.
axis_ind = np.zeros(inp.shape, dtype=bool)
axis_ind = np.logical_or(inp < bbox[ind][0], inp > bbox[ind][1], out=axis_ind)
nan_ind[axis_ind] = 1
# get an array with indices of valid inputs
valid_ind = np.logical_not(nan_ind).nonzero()
# inputs holds only inputs within the bbox
args = []
for input in inputs:
if not input.shape:
# shape is ()
if nan_ind:
outputs = [fill_value for a in args]
else:
args.append(input)
else:
args.append(input[valid_ind])
valid_result = self.evaluate(*chain(args, parameters))
if self.n_outputs == 1:
valid_result = [valid_result]
# combine the valid results with the ``fill_value`` values
# outside the bbox
result = [np.zeros(inputs[0].shape) + fill_value for i in range(len(valid_result))]
for ind, r in enumerate(valid_result):
if not result[ind].shape:
# shape is ()
result[ind] = r
else:
result[ind][valid_ind] = r
# format output
if self.n_outputs == 1:
outputs = np.asarray(result[0])
else:
outputs = [np.asarray(r) for r in result]
else:
outputs = self.evaluate(*chain(inputs, parameters))
if self.n_outputs == 1:
outputs = (outputs,)
outputs = self.prepare_outputs(format_info, *outputs, **kwargs)
outputs = self._process_output_units(inputs, outputs)
if self.n_outputs == 1:
return outputs[0]
else:
return outputs
# *** Arithmetic operators for creating compound models ***
__add__ = _model_oper('+')
__sub__ = _model_oper('-')
__mul__ = _model_oper('*')
__truediv__ = _model_oper('/')
__pow__ = _model_oper('**')
__or__ = _model_oper('|')
__and__ = _model_oper('&')
# *** Properties ***
@property
def name(self):
"""User-provided name for this model instance."""
return self._name
@name.setter
def name(self, val):
"""Assign a (new) name to this model."""
self._name = val
@property
def n_inputs(self):
"""
The number of inputs to this model.
Equivalent to ``len(model.inputs)``.
"""
return len(self.inputs)
@property
def n_outputs(self):
"""
The number of outputs from this model.
Equivalent to ``len(model.outputs)``.
"""
return len(self.outputs)
@property
def model_set_axis(self):
"""
The index of the model set axis--that is the axis of a parameter array
that pertains to which model a parameter value pertains to--as
specified when the model was initialized.
See the documentation on `Model Sets
<http://docs.astropy.org/en/stable/modeling/models.html#model-sets>`_
for more details.
"""
return self._model_set_axis
@property
def param_sets(self):
"""
Return parameters as a pset.
This is a list with one item per parameter set, which is an array of
that parameter's values across all parameter sets, with the last axis
associated with the parameter set.
"""
return self._param_sets()
@property
def parameters(self):
"""
A flattened array of all parameter values in all parameter sets.
Fittable parameters maintain this list and fitters modify it.
"""
# Currently the sequence of a model's parameters must be contiguous
# within the _parameters array (which may be a view of a larger array,
# for example when taking a sub-expression of a compound model), so
# the assumption here is reliable:
if not self.param_names:
# Trivial, but not unheard of
return self._parameters
start = self._param_metrics[self.param_names[0]]['slice'].start
stop = self._param_metrics[self.param_names[-1]]['slice'].stop
return self._parameters[start:stop]
@parameters.setter
def parameters(self, value):
"""
Assigning to this attribute updates the parameters array rather than
replacing it.
"""
if not self.param_names:
return
start = self._param_metrics[self.param_names[0]]['slice'].start
stop = self._param_metrics[self.param_names[-1]]['slice'].stop
try:
value = np.array(value).flatten()
self._parameters[start:stop] = value
except ValueError as e:
raise InputParameterError(
"Input parameter values not compatible with the model "
"parameters array: {0}".format(e))
@property
def fixed(self):
"""
A `dict` mapping parameter names to their fixed constraint.
"""
return self._constraints['fixed']
@property
def tied(self):
"""
A `dict` mapping parameter names to their tied constraint.
"""
return self._constraints['tied']
@property
def bounds(self):
"""
A `dict` mapping parameter names to their upper and lower bounds as
``(min, max)`` tuples.
"""
return self._constraints['bounds']
@property
def eqcons(self):
"""List of parameter equality constraints."""
return self._constraints['eqcons']
@property
def ineqcons(self):
"""List of parameter inequality constraints."""
return self._constraints['ineqcons']
@property
def inverse(self):
"""
Returns a new `~astropy.modeling.Model` instance which performs the
inverse transform, if an analytic inverse is defined for this model.
Even on models that don't have an inverse defined, this property can be
set with a manually-defined inverse, such a pre-computed or
experimentally determined inverse (often given as a
`~astropy.modeling.polynomial.PolynomialModel`, but not by
requirement).
A custom inverse can be deleted with ``del model.inverse``. In this
case the model's inverse is reset to its default, if a default exists
(otherwise the default is to raise `NotImplementedError`).
Note to authors of `~astropy.modeling.Model` subclasses: To define an
inverse for a model simply override this property to return the
appropriate model representing the inverse. The machinery that will
make the inverse manually-overridable is added automatically by the
base class.
"""
if self._user_inverse is not None:
return self._user_inverse
elif self._inverse is not None:
return self._inverse()
raise NotImplementedError("An analytical inverse transform has not "
"been implemented for this model.")
@inverse.setter
def inverse(self, value):
if not isinstance(value, (Model, type(None))):
raise ValueError(
"The ``inverse`` attribute may be assigned a `Model` "
"instance or `None` (where `None` explicitly forces the "
"model to have no inverse.")
self._user_inverse = value
@inverse.deleter
def inverse(self):
"""
Resets the model's inverse to its default (if one exists, otherwise
the model will have no inverse).
"""
del self._user_inverse
@property
def has_user_inverse(self):
"""
A flag indicating whether or not a custom inverse model has been
assigned to this model by a user, via assignment to ``model.inverse``.
"""
return self._user_inverse is not None
@property
def bounding_box(self):
r"""
A `tuple` of length `n_inputs` defining the bounding box limits, or
`None` for no bounding box.
The default limits are given by a ``bounding_box`` property or method
defined in the class body of a specific model. If not defined then
this property just raises `NotImplementedError` by default (but may be
assigned a custom value by a user). ``bounding_box`` can be set
manually to an array-like object of shape ``(model.n_inputs, 2)``. For
further usage, see :ref:`bounding-boxes`
The limits are ordered according to the `numpy` indexing
convention, and are the reverse of the model input order,
e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined:
* for 1D: ``(x_low, x_high)``
* for 2D: ``((y_low, y_high), (x_low, x_high))``
* for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))``
Examples
--------
Setting the ``bounding_box`` limits for a 1D and 2D model:
>>> from astropy.modeling.models import Gaussian1D, Gaussian2D
>>> model_1d = Gaussian1D()
>>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1)
>>> model_1d.bounding_box = (-5, 5)
>>> model_2d.bounding_box = ((-6, 6), (-5, 5))
Setting the bounding_box limits for a user-defined 3D `custom_model`:
>>> from astropy.modeling.models import custom_model
>>> def const3d(x, y, z, amp=1):
... return amp
...
>>> Const3D = custom_model(const3d)
>>> model_3d = Const3D()
>>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4))
To reset ``bounding_box`` to its default limits just delete the
user-defined value--this will reset it back to the default defined
on the class:
>>> del model_1d.bounding_box
To disable the bounding box entirely (including the default),
set ``bounding_box`` to `None`:
>>> model_1d.bounding_box = None
>>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "astropy\modeling\core.py", line 980, in bounding_box
"No bounding box is defined for this model (note: the "
NotImplementedError: No bounding box is defined for this model (note:
the bounding box was explicitly disabled for this model; use `del
model.bounding_box` to restore the default bounding box, if one is
defined for this model).
"""
if self._user_bounding_box is not None:
if self._user_bounding_box is NotImplemented:
raise NotImplementedError(
"No bounding box is defined for this model (note: the "
"bounding box was explicitly disabled for this model; "
"use `del model.bounding_box` to restore the default "
"bounding box, if one is defined for this model).")
return self._user_bounding_box
elif self._bounding_box is None:
raise NotImplementedError(
"No bounding box is defined for this model.")
elif isinstance(self._bounding_box, _BoundingBox):
# This typically implies a hard-coded bounding box. This will
# probably be rare, but it is an option
return self._bounding_box
elif isinstance(self._bounding_box, types.MethodType):
return self._bounding_box()
else:
# The only other allowed possibility is that it's a _BoundingBox
# subclass, so we call it with its default arguments and return an
# instance of it (that can be called to recompute the bounding box
# with any optional parameters)
# (In other words, in this case self._bounding_box is a *class*)
bounding_box = self._bounding_box((), _model=self)()
return self._bounding_box(bounding_box, _model=self)
@bounding_box.setter
def bounding_box(self, bounding_box):
"""
Assigns the bounding box limits.
"""
if bounding_box is None:
cls = None
# We use this to explicitly set an unimplemented bounding box (as
# opposed to no user bounding box defined)
bounding_box = NotImplemented
elif (isinstance(self._bounding_box, type) and
issubclass(self._bounding_box, _BoundingBox)):
cls = self._bounding_box
else:
cls = _BoundingBox
if cls is not None:
try:
bounding_box = cls.validate(self, bounding_box)
except ValueError as exc:
raise ValueError(exc.args[0])
self._user_bounding_box = bounding_box
@bounding_box.deleter
def bounding_box(self):
self._user_bounding_box = None
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
@property
def separable(self):
""" A flag indicating whether a model is separable."""
if self._separable is not None:
return self._separable
else:
raise NotImplementedError(
'The "separable" property is not defined for '
'model {}'.format(self.__class__.__name__))
# *** Public methods ***
def without_units_for_data(self, **kwargs):
"""
Return an instance of the model for which the parameter values have been
converted to the right units for the data, then the units have been
stripped away.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters should be converted to are not necessarily
the units of the input data, but are derived from them. Model subclasses
that want fitting to work in the presence of quantities need to define a
_parameter_units_for_data_units method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)
for inp in self.inputs if kwargs[inp] is not None}
outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)
for out in self.outputs if kwargs[out] is not None}
parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit)
for name, unit in parameter_units.items():
parameter = getattr(model, name)
if parameter.unit is not None:
parameter.value = parameter.quantity.to(unit).value
parameter._set_unit(None, force=True)
return model
def with_units_from_data(self, **kwargs):
"""
Return an instance of the model which has units for which the parameter
values are compatible with the data units specified.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters will gain are not necessarily the units of
the input data, but are derived from them. Model subclasses that want
fitting to work in the presence of quantities need to define a
_parameter_units_for_data_units method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)
for inp in self.inputs if kwargs[inp] is not None}
outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)
for out in self.outputs if kwargs[out] is not None}
parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit)
# We are adding units to parameters that already have a value, but we
# don't want to convert the parameter, just add the unit directly, hence
# the call to _set_unit.
for name, unit in parameter_units.items():
parameter = getattr(model, name)
parameter._set_unit(unit, force=True)
return model
@property
def _has_units(self):
# Returns True if any of the parameters have units
for param in self.param_names:
if getattr(self, param).unit is not None:
return True
else:
return False
@property
def _supports_unit_fitting(self):
# If the model has a '_parameter_units_for_data_units' method, this
# indicates that we have enough information to strip the units away
# and add them back after fitting, when fitting quantities
return hasattr(self, '_parameter_units_for_data_units')
@abc.abstractmethod
def evaluate(self, *args, **kwargs):
"""Evaluate the model on some input variables."""
def sum_of_implicit_terms(self, *args, **kwargs):
"""
Evaluate the sum of any implicit model terms on some input variables.
This includes any fixed terms used in evaluating a linear model that
do not have corresponding parameters exposed to the user. The
prototypical case is `astropy.modeling.functional_models.Shift`, which
corresponds to a function y = a + bx, where b=1 is intrinsically fixed
by the type of model, such that sum_of_implicit_terms(x) == x. This
method is needed by linear fitters to correct the dependent variable
for the implicit term(s) when solving for the remaining terms
(ie. a = y - bx).
"""
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of the
returned array. If this is not provided (or None), the model will be
evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of this
model is not set.
Examples
--------
:ref:`bounding-boxes`
"""
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError('If no bounding_box is set, '
'coords or out must be input.')
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError('inconsistent shape of the output.')
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out, dtype=float)
if out.ndim != ndim:
raise ValueError('the array and model must have the same '
'number of dimensions.')
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))
for bb in bbox]).astype(int).T
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos)
for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
'The `bounding_box` is larger than the input out in '
'one or more dimensions. Set '
'`model.bounding_box = None`.')
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
@property
def input_units(self):
"""
This property is used to indicate what units or sets of units the
evaluate method expects, and returns a dictionary mapping inputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid input units, in which case this property should
not be overriden since it will return the input units based on the
annotations.
"""
if hasattr(self, '_input_units'):
return self._input_units
elif hasattr(self.evaluate, '__annotations__'):
annotations = self.evaluate.__annotations__.copy()
annotations.pop('return', None)
if annotations:
# If there are not annotations for all inputs this will error.
return dict((name, annotations[name]) for name in self.inputs)
else:
# None means any unit is accepted
return None
@input_units.setter
def input_units(self, input_units):
self._input_units = input_units
@property
def return_units(self):
"""
This property is used to indicate what units or sets of units the output
of evaluate should be in, and returns a dictionary mapping outputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid output units, in which case this property should not be
overriden since it will return the return units based on the
annotations.
"""
if hasattr(self, '_return_units'):
return self._return_units
elif hasattr(self.evaluate, '__annotations__'):
return self.evaluate.__annotations__.get('return', None)
else:
# None means any unit is accepted
return None
@return_units.setter
def return_units(self, return_units):
self._return_units = return_units
def prepare_inputs(self, *inputs, model_set_axis=None, equivalencies=None,
**kwargs):
"""
This method is used in `~astropy.modeling.Model.__call__` to ensure
that all the inputs to the model can be broadcast into compatible
shapes (if one or both of them are input as arrays), particularly if
there are more than one parameter sets. This also makes sure that (if
applicable) the units of the input will be compatible with the evaluate
method.
"""
# When we instantiate the model class, we make sure that __call__ can
# take the following two keyword arguments: model_set_axis and
# equivalencies.
if model_set_axis is None:
# By default the model_set_axis for the input is assumed to be the
# same as that for the parameters the model was defined with
# TODO: Ensure that negative model_set_axis arguments are respected
model_set_axis = self.model_set_axis
n_models = len(self)
params = [getattr(self, name) for name in self.param_names]
inputs = [np.asanyarray(_input, dtype=float) for _input in inputs]
_validate_input_shapes(inputs, self.inputs, n_models,
model_set_axis, self.standard_broadcasting)
inputs = self._validate_input_units(inputs, equivalencies)
# The input formatting required for single models versus a multiple
# model set are different enough that they've been split into separate
# subroutines
if n_models == 1:
return _prepare_inputs_single_model(self, params, inputs,
**kwargs)
else:
return _prepare_inputs_model_set(self, params, inputs, n_models,
model_set_axis, **kwargs)
def _validate_input_units(self, inputs, equivalencies=None):
inputs = list(inputs)
name = self.name or self.__class__.__name__
# Check that the units are correct, if applicable
if self.input_units is not None:
# We combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(self.inputs,
equivalencies,
self.input_units_equivalencies)
# We now iterate over the different inputs and make sure that their
# units are consistent with those specified in input_units.
for i in range(len(inputs)):
input_name = self.inputs[i]
input_unit = self.input_units.get(input_name, None)
if input_unit is None:
continue
if isinstance(inputs[i], Quantity):
# We check for consistency of the units with input_units,
# taking into account any equivalencies
if inputs[i].unit.is_equivalent(input_unit, equivalencies=input_units_equivalencies[input_name]):
# If equivalencies have been specified, we need to
# convert the input to the input units - this is because
# some equivalencies are non-linear, and we need to be
# sure that we evaluate the model in its own frame
# of reference. If input_units_strict is set, we also
# need to convert to the input units.
if len(input_units_equivalencies) > 0 or self.input_units_strict[input_name]:
inputs[i] = inputs[i].to(input_unit, equivalencies=input_units_equivalencies[input_name])
else:
# We consider the following two cases separately so as
# to be able to raise more appropriate/nicer exceptions
if input_unit is dimensionless_unscaled:
raise UnitsError("{0}: Units of input '{1}', {2} ({3}), could not be "
"converted to required dimensionless "
"input".format(name,
self.inputs[i],
inputs[i].unit,
inputs[i].unit.physical_type))
else:
raise UnitsError("{0}: Units of input '{1}', {2} ({3}), could not be "
"converted to required input units of "
"{4} ({5})".format(name, self.inputs[i],
inputs[i].unit,
inputs[i].unit.physical_type,
input_unit,
input_unit.physical_type))
else:
# If we allow dimensionless input, we add the units to the
# input values without conversion, otherwise we raise an
# exception.
if (not self.input_units_allow_dimensionless[input_name] and
input_unit is not dimensionless_unscaled and input_unit is not None):
if np.any(inputs[i] != 0):
raise UnitsError("{0}: Units of input '{1}', (dimensionless), could not be "
"converted to required input units of "
"{2} ({3})".format(name, self.inputs[i], input_unit,
input_unit.physical_type))
return inputs
def _process_output_units(self, inputs, outputs):
inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs])
if self.return_units and inputs_are_quantity:
# We allow a non-iterable unit only if there is one output
if self.n_outputs == 1 and not isiterable(self.return_units):
return_units = {self.outputs[0]: self.return_units}
else:
return_units = self.return_units
outputs = tuple([Quantity(out, return_units[out_name], subok=True)
for out, out_name in zip(outputs, self.outputs)])
return outputs
def prepare_outputs(self, format_info, *outputs, **kwargs):
model_set_axis = kwargs.get('model_set_axis', None)
if len(self) == 1:
return _prepare_outputs_single_model(self, outputs, format_info)
else:
return _prepare_outputs_model_set(self, outputs, format_info, model_set_axis)
def copy(self):
"""
Return a copy of this model.
Uses a deep copy so that all model attributes, including parameter
values, are copied as well.
"""
return copy.deepcopy(self)
def deepcopy(self):
"""
Return a deep copy of this model.
"""
return copy.deepcopy(self)
@sharedmethod
def rename(self, name):
"""
Return a copy of this model with a new name.
"""
new_model = self.copy()
new_model._name = name
return new_model
@sharedmethod
def n_submodels(self):
"""
Return the number of components in a single model, which is
obviously 1.
"""
return 1
# *** Internal methods ***
@sharedmethod
def _from_existing(self, existing, param_names):
"""
Creates a new instance of ``cls`` that shares its underlying parameter
values with an existing model instance given by ``existing``.
This is used primarily by compound models to return a view of an
individual component of a compound model. ``param_names`` should be
the names of the parameters in the *existing* model to use as the
parameters in this new model. Its length should equal the number of
parameters this model takes, so that it can map parameters on the
existing model to parameters on this model one-to-one.
"""
# Basically this is an alternative __init__
if isinstance(self, type):
# self is a class, not an instance
needs_initialization = True
dummy_args = (0,) * len(param_names)
self = self.__new__(self, *dummy_args)
else:
needs_initialization = False
self = self.copy()
aliases = dict(zip(self.param_names, param_names))
# This is basically an alternative _initialize_constraints
constraints = {}
for cons_type in self.parameter_constraints:
orig = existing._constraints[cons_type]
constraints[cons_type] = AliasDict(orig, aliases)
self._constraints = constraints
self._n_models = existing._n_models
self._model_set_axis = existing._model_set_axis
self._parameters = existing._parameters
self._param_metrics = defaultdict(dict)
for param_a, param_b in aliases.items():
# Take the param metrics info for the giving parameters in the
# existing model, and hand them to the appropriate parameters in
# the new model
self._param_metrics[param_a] = existing._param_metrics[param_b]
if needs_initialization:
self.__init__(*dummy_args)
return self
def _initialize_constraints(self, kwargs):
"""
Pop parameter constraint values off the keyword arguments passed to
`Model.__init__` and store them in private instance attributes.
"""
if hasattr(self, '_constraints'):
# Skip constraint initialization if it has already been handled via
# an alternate initialization
return
self._constraints = {}
# Pop any constraints off the keyword arguments
for constraint in self.parameter_constraints:
values = kwargs.pop(constraint, {})
self._constraints[constraint] = values.copy()
# Update with default parameter constraints
for param_name in self.param_names:
param = getattr(self, param_name)
# Parameters don't have all constraint types
value = getattr(param, constraint)
if value is not None:
self._constraints[constraint][param_name] = value
for constraint in self.model_constraints:
values = kwargs.pop(constraint, [])
self._constraints[constraint] = values
def _initialize_parameters(self, args, kwargs):
"""
Initialize the _parameters array that stores raw parameter values for
all parameter sets for use with vectorized fitting algorithms; on
FittableModels the _param_name attributes actually just reference
slices of this array.
"""
if hasattr(self, '_parameters'):
# Skip parameter initialization if it has already been handled via
# an alternate initialization
return
n_models = kwargs.pop('n_models', None)
if not (n_models is None or
(isinstance(n_models, (int, np.integer)) and n_models >= 1)):
raise ValueError(
"n_models must be either None (in which case it is "
"determined from the model_set_axis of the parameter initial "
"values) or it must be a positive integer "
"(got {0!r})".format(n_models))
model_set_axis = kwargs.pop('model_set_axis', None)
if model_set_axis is None:
if n_models is not None and n_models > 1:
# Default to zero
model_set_axis = 0
else:
# Otherwise disable
model_set_axis = False
else:
if not (model_set_axis is False or
(isinstance(model_set_axis, int) and
not isinstance(model_set_axis, bool))):
raise ValueError(
"model_set_axis must be either False or an integer "
"specifying the parameter array axis to map to each "
"model in a set of models (got {0!r}).".format(
model_set_axis))
# Process positional arguments by matching them up with the
# corresponding parameters in self.param_names--if any also appear as
# keyword arguments this presents a conflict
params = {}
if len(args) > len(self.param_names):
raise TypeError(
"{0}.__init__() takes at most {1} positional arguments ({2} "
"given)".format(self.__class__.__name__, len(self.param_names),
len(args)))
self._model_set_axis = model_set_axis
self._param_metrics = defaultdict(dict)
for idx, arg in enumerate(args):
if arg is None:
# A value of None implies using the default value, if exists
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
params[self.param_names[idx]] = quantity_asanyarray(arg, dtype=float)
# At this point the only remaining keyword arguments should be
# parameter names; any others are in error.
for param_name in self.param_names:
if param_name in kwargs:
if param_name in params:
raise TypeError(
"{0}.__init__() got multiple values for parameter "
"{1!r}".format(self.__class__.__name__, param_name))
value = kwargs.pop(param_name)
if value is None:
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
params[param_name] = quantity_asanyarray(value, dtype=float)
if kwargs:
# If any keyword arguments were left over at this point they are
# invalid--the base class should only be passed the parameter
# values, constraints, and param_dim
for kwarg in kwargs:
# Just raise an error on the first unrecognized argument
raise TypeError(
'{0}.__init__() got an unrecognized parameter '
'{1!r}'.format(self.__class__.__name__, kwarg))
# Determine the number of model sets: If the model_set_axis is
# None then there is just one parameter set; otherwise it is determined
# by the size of that axis on the first parameter--if the other
# parameters don't have the right number of axes or the sizes of their
# model_set_axis don't match an error is raised
if model_set_axis is not False and n_models != 1 and params:
max_ndim = 0
if model_set_axis < 0:
min_ndim = abs(model_set_axis)
else:
min_ndim = model_set_axis + 1
for name, value in params.items():
param_ndim = np.ndim(value)
if param_ndim < min_ndim:
raise InputParameterError(
"All parameter values must be arrays of dimension "
"at least {0} for model_set_axis={1} (the value "
"given for {2!r} is only {3}-dimensional)".format(
min_ndim, model_set_axis, name, param_ndim))
max_ndim = max(max_ndim, param_ndim)
if n_models is None:
# Use the dimensions of the first parameter to determine
# the number of model sets
n_models = value.shape[model_set_axis]
elif value.shape[model_set_axis] != n_models:
raise InputParameterError(
"Inconsistent dimensions for parameter {0!r} for "
"{1} model sets. The length of axis {2} must be the "
"same for all input parameter values".format(
name, n_models, model_set_axis))
self._check_param_broadcast(params, max_ndim)
else:
if n_models is None:
n_models = 1
self._check_param_broadcast(params, None)
self._n_models = n_models
self._initialize_parameter_values(params)
def _initialize_parameter_values(self, params):
# self._param_metrics should have been initialized in
# self._initialize_parameters
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
unit = None
param_descr = getattr(self, name)
if params.get(name) is None:
default = param_descr.default
if default is None:
# No value was supplied for the parameter and the
# parameter does not have a default, therefore the model
# is underspecified
raise TypeError(
"{0}.__init__() requires a value for parameter "
"{1!r}".format(self.__class__.__name__, name))
value = params[name] = default
unit = param_descr.unit
else:
value = params[name]
if isinstance(value, Quantity):
unit = value.unit
else:
unit = None
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name]['slice'] = param_slice
param_metrics[name]['shape'] = param_shape
if unit is None and param_descr.unit is not None:
raise InputParameterError(
"{0}.__init__() requires a Quantity for parameter "
"{1!r}".format(self.__class__.__name__, name))
param_metrics[name]['orig_unit'] = unit
param_metrics[name]['raw_unit'] = None
if param_descr._setter is not None:
_val = param_descr._setter(value)
if isinstance(_val, Quantity):
param_metrics[name]['raw_unit'] = _val.unit
else:
param_metrics[name]['raw_unit'] = None
total_size += param_size
self._param_metrics = param_metrics
self._parameters = np.empty(total_size, dtype=np.float64)
# Now set the parameter values (this will also fill
# self._parameters)
# TODO: This is a bit ugly, but easier to deal with than how this was
# done previously. There's still lots of opportunity for refactoring
# though, in particular once we move the _get/set_model_value methods
# out of Parameter and into Model (renaming them
# _get/set_parameter_value)
for name, value in params.items():
# value here may be a Quantity object.
param_descr = getattr(self, name)
unit = param_descr.unit
value = np.array(value)
orig_unit = param_metrics[name]['orig_unit']
if param_descr._setter is not None:
if unit is not None:
value = np.asarray(param_descr._setter(value * orig_unit).value)
else:
value = param_descr._setter(value)
self._parameters[param_metrics[name]['slice']] = value.ravel()
# Finally validate all the parameters; we do this last so that
# validators that depend on one of the other parameters' values will
# work
for name in params:
param_descr = getattr(self, name)
param_descr.validator(param_descr.value)
def _check_param_broadcast(self, params, max_ndim):
"""
This subroutine checks that all parameter arrays can be broadcast
against each other, and determines the shapes parameters must have in
order to broadcast correctly.
If model_set_axis is None this merely checks that the parameters
broadcast and returns an empty dict if so. This mode is only used for
single model sets.
"""
all_shapes = []
param_names = []
model_set_axis = self._model_set_axis
for name in self.param_names:
# Previously this just used iteritems(params), but we loop over all
# param_names instead just to ensure some determinism in the
# ordering behavior
if name not in params:
continue
value = params[name]
param_names.append(name)
# We've already checked that each parameter array is compatible in
# the model_set_axis dimension, but now we need to check the
# dimensions excluding that axis
# Split the array dimensions into the axes before model_set_axis
# and after model_set_axis
param_shape = np.shape(value)
param_ndim = len(param_shape)
if max_ndim is not None and param_ndim < max_ndim:
# All arrays have the same number of dimensions up to the
# model_set_axis dimension, but after that they may have a
# different number of trailing axes. The number of trailing
# axes must be extended for mutual compatibility. For example
# if max_ndim = 3 and model_set_axis = 0, an array with the
# shape (2, 2) must be extended to (2, 1, 2). However, an
# array with shape (2,) is extended to (2, 1).
new_axes = (1,) * (max_ndim - param_ndim)
if model_set_axis < 0:
# Just need to prepend axes to make up the difference
broadcast_shape = new_axes + param_shape
else:
broadcast_shape = (param_shape[:model_set_axis + 1] +
new_axes +
param_shape[model_set_axis + 1:])
self._param_metrics[name]['broadcast_shape'] = broadcast_shape
all_shapes.append(broadcast_shape)
else:
all_shapes.append(param_shape)
# Now check mutual broadcastability of all shapes
try:
check_broadcast(*all_shapes)
except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
param_a = param_names[shape_a_idx]
param_b = param_names[shape_b_idx]
raise InputParameterError(
"Parameter {0!r} of shape {1!r} cannot be broadcast with "
"parameter {2!r} of shape {3!r}. All parameter arrays "
"must have shapes that are mutually compatible according "
"to the broadcasting rules.".format(param_a, shape_a,
param_b, shape_b))
def _param_sets(self, raw=False, units=False):
"""
Implementation of the Model.param_sets property.
This internal implementation has a ``raw`` argument which controls
whether or not to return the raw parameter values (i.e. the values that
are actually stored in the ._parameters array, as opposed to the values
displayed to users. In most cases these are one in the same but there
are currently a few exceptions.
Note: This is notably an overcomplicated device and may be removed
entirely in the near future.
"""
param_metrics = self._param_metrics
values = []
shapes = []
for name in self.param_names:
param = getattr(self, name)
if raw:
value = param._raw_value
else:
value = param.value
broadcast_shape = param_metrics[name].get('broadcast_shape')
if broadcast_shape is not None:
value = value.reshape(broadcast_shape)
shapes.append(np.shape(value))
if len(self) == 1:
# Add a single param set axis to the parameter's value (thus
# converting scalars to shape (1,) array values) for
# consistency
value = np.array([value])
if units:
if raw and self._param_metrics[name]['raw_unit'] is not None:
unit = self._param_metrics[name]['raw_unit']
else:
unit = param.unit
if unit is not None:
value = Quantity(value, unit)
values.append(value)
if len(set(shapes)) != 1 or units:
# If the parameters are not all the same shape, converting to an
# array is going to produce an object array
# However the way Numpy creates object arrays is tricky in that it
# will recurse into array objects in the list and break them up
# into separate objects. Doing things this way ensures a 1-D
# object array the elements of which are the individual parameter
# arrays. There's not much reason to do this over returning a list
# except for consistency
psets = np.empty(len(values), dtype=object)
psets[:] = values
return psets
# TODO: Returning an array from this method may be entirely pointless
# for internal use--perhaps only the external param_sets method should
# return an array (and just for backwards compat--I would prefer to
# maybe deprecate that method)
return np.array(values)
def _format_repr(self, args=[], kwargs={}, defaults={}):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
# TODO: I think this could be reworked to preset model sets better
parts = [repr(a) for a in args]
parts.extend(
"{0}={1}".format(name,
param_repr_oneline(getattr(self, name)))
for name in self.param_names)
if self.name is not None:
parts.append('name={0!r}'.format(self.name))
for kwarg, value in kwargs.items():
if kwarg in defaults and defaults[kwarg] != value:
continue
parts.append('{0}={1!r}'.format(kwarg, value))
if len(self) > 1:
parts.append("n_models={0}".format(len(self)))
return '<{0}({1})>'.format(self.__class__.__name__, ', '.join(parts))
def _format_str(self, keywords=[]):
"""
Internal implementation of ``__str__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__str__`` while keeping the same basic
formatting.
"""
default_keywords = [
('Model', self.__class__.__name__),
('Name', self.name),
('Inputs', self.inputs),
('Outputs', self.outputs),
('Model set size', len(self))
]
parts = ['{0}: {1}'.format(keyword, value)
for keyword, value in default_keywords + keywords
if value is not None]
parts.append('Parameters:')
if len(self) == 1:
columns = [[getattr(self, name).value]
for name in self.param_names]
else:
columns = [getattr(self, name).value
for name in self.param_names]
if columns:
param_table = Table(columns, names=self.param_names)
# Set units on the columns
for name in self.param_names:
param_table[name].unit = getattr(self, name).unit
parts.append(indent(str(param_table), width=4))
return '\n'.join(parts)
class FittableModel(Model):
"""
Base class for models that can be fitted using the built-in fitting
algorithms.
"""
linear = False
# derivative with respect to parameters
fit_deriv = None
"""
Function (similar to the model's `~Model.evaluate`) to compute the
derivatives of the model with respect to its parameters, for use by fitting
algorithms. In other words, this computes the Jacobian matrix with respect
to the model's parameters.
"""
# Flag that indicates if the model derivatives with respect to parameters
# are given in columns or rows
col_fit_deriv = True
fittable = True
class Fittable1DModel(FittableModel):
"""
Base class for one-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
inputs = ('x',)
outputs = ('y',)
_separable = True
class Fittable2DModel(FittableModel):
"""
Base class for two-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
inputs = ('x', 'y')
outputs = ('z',)
def _make_arithmetic_operator(oper):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
def op(f, g):
return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2])
return op
def _composition_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: g[0](f[0](inputs, params), params),
f[1], g[2])
def _join_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: (f[0](inputs[:f[1]], params) +
g[0](inputs[f[1]:], params)),
f[1] + g[1], f[2] + g[2])
# TODO: Support a couple unary operators--at least negation?
BINARY_OPERATORS = {
'+': _make_arithmetic_operator(operator.add),
'-': _make_arithmetic_operator(operator.sub),
'*': _make_arithmetic_operator(operator.mul),
'/': _make_arithmetic_operator(operator.truediv),
'**': _make_arithmetic_operator(operator.pow),
'|': _composition_operator,
'&': _join_operator
}
_ORDER_OF_OPERATORS = [('|',), ('&',), ('+', '-'), ('*', '/'), ('**',)]
OPERATOR_PRECEDENCE = {}
for idx, ops in enumerate(_ORDER_OF_OPERATORS):
for op in ops:
OPERATOR_PRECEDENCE[op] = idx
del idx, op, ops
class _CompoundModelMeta(_ModelMeta):
_tree = None
_submodels = None
_submodel_names = None
_nextid = 0
_param_names = None
# _param_map is a mapping of the compound model's generated param names to
# the parameters of submodels they are associated with. The values in this
# mapping are (idx, name) tuples were idx is the index of the submodel this
# parameter is associated with, and name is the same parameter's name on
# the submodel
# In principle this will allow compound models to give entirely new names
# to parameters that don't have to be the same as their original names on
# the submodels, but right now that isn't taken advantage of
_param_map = None
_slice_offset = 0
# When taking slices of a compound model, this keeps track of how offset
# the first model in the slice is from the first model in the original
# compound model it was taken from
# This just inverts _param_map, swapping keys with values. This is also
# useful to have.
_param_map_inverse = None
_fittable = None
_evaluate = None
def __getitem__(cls, index):
index = cls._normalize_index(index)
if isinstance(index, (int, np.integer)):
return cls._get_submodels()[index]
else:
return cls._get_slice(index.start, index.stop)
def __getattr__(cls, attr):
# Make sure the _tree attribute is set; otherwise we are not looking up
# an attribute on a concrete compound model class and should just raise
# the AttributeError
if cls._tree is not None and attr in cls.param_names:
cls._init_param_descriptors()
return getattr(cls, attr)
raise AttributeError(attr)
def __repr__(cls):
if cls._tree is None:
# This case is mostly for debugging purposes
return cls._format_cls_repr()
expression = cls._format_expression()
components = cls._format_components()
keywords = [
('Expression', expression),
('Components', '\n' + indent(components))
]
return cls._format_cls_repr(keywords=keywords)
def __dir__(cls):
"""
Returns a list of attributes defined on a compound model, including
all of its parameters.
"""
basedir = super().__dir__()
if cls._tree is not None:
for name in cls.param_names:
basedir.append(name)
basedir.sort()
return basedir
def __reduce__(cls):
rv = super().__reduce__()
if isinstance(rv, tuple):
# Delete _evaluate from the members dict
with suppress(KeyError):
del rv[1][2]['_evaluate']
return rv
@property
def submodel_names(cls):
if cls._submodel_names is None:
seen = {}
names = []
for idx, submodel in enumerate(cls._get_submodels()):
name = str(submodel.name)
if name in seen:
names.append('{0}_{1}'.format(name, idx))
if seen[name] >= 0:
jdx = seen[name]
names[jdx] = '{0}_{1}'.format(names[jdx], jdx)
seen[name] = -1
else:
names.append(name)
seen[name] = idx
cls._submodel_names = tuple(names)
return cls._submodel_names
@property
def param_names(cls):
if cls._param_names is None:
cls._init_param_names()
return cls._param_names
@property
def fittable(cls):
if cls._fittable is None:
cls._fittable = all(m.fittable for m in cls._get_submodels())
return cls._fittable
# TODO: Maybe we could use make_function_with_signature for evaluate, but
# it's probably not worth it (and I'm not sure what the limit is on number
# of function arguments/local variables but we could break that limit for
# complicated compound models...
def evaluate(cls, *args):
if cls._evaluate is None:
func = cls._tree.evaluate(BINARY_OPERATORS,
getter=cls._model_evaluate_getter)[0]
cls._evaluate = func
inputs = args[:cls.n_inputs]
params = iter(args[cls.n_inputs:])
result = cls._evaluate(inputs, params)
if cls.n_outputs == 1:
return result[0]
else:
return result
# TODO: This supports creating a new compound model from two existing
# compound models (or normal models) and a single operator. However, it
# ought also to be possible to create a new model from an *entire*
# expression, represented as a sequence of operators and their operands (or
# an exiting ExpressionTree) and build that into a compound model without
# creating an intermediate _CompoundModel class for every single operator
# in the expression. This will prove to be a useful optimization in many
# cases
@classmethod
def _from_operator(mcls, operator, left, right, additional_members={}):
"""
Given a Python operator (represented by a string, such as ``'+'``
or ``'*'``, and two model classes or instances, return a new compound
model that evaluates the given operator on the outputs of the left and
right input models.
If either of the input models are a model *class* (i.e. a subclass of
`~astropy.modeling.Model`) then the returned model is a new subclass of
`~astropy.modeling.Model` that may be instantiated with any parameter
values. If both input models are *instances* of a model, a new class
is still created, but this method returns an *instance* of that class,
taking the parameter values from the parameters of the input model
instances.
If given, the ``additional_members`` `dict` may provide additional
class members that should be added to the generated
`~astropy.modeling.Model` subclass. Some members that are generated by
this method should not be provided by ``additional_members``. These
include ``_tree``, ``inputs``, ``outputs``, ``linear``,
``standard_broadcasting``, and ``__module__`. This is currently for
internal use only.
"""
# Note, currently this only supports binary operators, but could be
# easily extended to support unary operators (namely '-') if/when
# needed
children = []
for child in (left, right):
if isinstance(child, (_CompoundModelMeta, _CompoundModel)):
"""
Although the original child models were copied we make another
copy here to ensure that changes in this child compound model
parameters will not propagate to the reuslt, that is
cm1 = Gaussian1D(1, 5, .1) + Gaussian1D()
cm2 = cm1 | Scale()
cm1.amplitude_0 = 100
assert(cm2.amplitude_0 == 1)
"""
children.append(copy.deepcopy(child._tree))
elif isinstance(child, Model):
children.append(ExpressionTree(child.copy(),
inputs=child.inputs,
outputs=child.outputs))
else:
children.append(ExpressionTree(child, inputs=child.inputs, outputs=child.outputs))
inputs, outputs = mcls._check_inputs_and_outputs(operator, left, right)
tree = ExpressionTree(operator, left=children[0], right=children[1],
inputs=inputs, outputs=outputs)
name = str('CompoundModel{0}'.format(_CompoundModelMeta._nextid))
_CompoundModelMeta._nextid += 1
mod = find_current_module(3)
if mod:
modname = mod.__name__
else:
modname = '__main__'
if operator in ('|', '+', '-'):
linear = left.linear and right.linear
else:
# Which is not to say it is *definitely* not linear but it would be
# trickier to determine
linear = False
standard_broadcasting = left.standard_broadcasting and right.standard_broadcasting
# Note: If any other members are added here, make sure to mention them
# in the docstring of this method.
members = additional_members
members.update({
'_tree': tree,
'_is_dynamic': True, # See docs for _ModelMeta._is_dynamic
'inputs': inputs,
'outputs': outputs,
'linear': linear,
'standard_broadcasting': standard_broadcasting,
'__module__': str(modname)})
new_cls = mcls(name, (_CompoundModel,), members)
if isinstance(left, Model) and isinstance(right, Model):
# Both models used in the operator were already instantiated models,
# not model *classes*. As such it's not particularly useful to return
# the class itself, but to instead produce a new instance:
instance = new_cls()
# Workaround for https://github.com/astropy/astropy/issues/3542
# TODO: Any effort to restructure the tree-like data structure for
# compound models should try to obviate this workaround--if
# intermediate compound models are stored in the tree as well then
# we can immediately check for custom inverses on sub-models when
# computing the inverse
instance._user_inverse = mcls._make_user_inverse(
operator, left, right)
if left._n_models == right._n_models:
instance._n_models = left._n_models
else:
raise ValueError('Model sets must have the same number of '
'components.')
return instance
# Otherwise return the new uninstantiated class itself
return new_cls
@classmethod
def _check_inputs_and_outputs(mcls, operator, left, right):
# TODO: These aren't the full rules for handling inputs and outputs, but
# this will handle most basic cases correctly
if operator == '|':
inputs = left.inputs
outputs = right.outputs
if left.n_outputs != right.n_inputs:
raise ModelDefinitionError(
"Unsupported operands for |: {0} (n_inputs={1}, "
"n_outputs={2}) and {3} (n_inputs={4}, n_outputs={5}); "
"n_outputs for the left-hand model must match n_inputs "
"for the right-hand model.".format(
left.name, left.n_inputs, left.n_outputs, right.name,
right.n_inputs, right.n_outputs))
elif operator == '&':
inputs = combine_labels(left.inputs, right.inputs)
outputs = combine_labels(left.outputs, right.outputs)
else:
# Without loss of generality
inputs = left.inputs
outputs = left.outputs
if (left.n_inputs != right.n_inputs or
left.n_outputs != right.n_outputs):
raise ModelDefinitionError(
"Unsupported operands for {0}: {1} (n_inputs={2}, "
"n_outputs={3}) and {4} (n_inputs={5}, n_outputs={6}); "
"models must have the same n_inputs and the same "
"n_outputs for this operator".format(
operator, left.name, left.n_inputs, left.n_outputs,
right.name, right.n_inputs, right.n_outputs))
return inputs, outputs
@classmethod
def _make_user_inverse(mcls, operator, left, right):
"""
Generates an inverse `Model` for this `_CompoundModel` when either
model in the operation has a *custom inverse* that was manually
assigned by the user.
If either model has a custom inverse, and in particular if another
`_CompoundModel` has a custom inverse, then none of that model's
sub-models should be considered at all when computing the inverse.
So in that case we just compute the inverse ahead of time and set
it as the new compound model's custom inverse.
Note, this use case only applies when combining model instances,
since model classes don't currently have a notion of a "custom
inverse" (though it could probably be supported by overriding the
class's inverse property).
TODO: Consider fixing things so the aforementioned class-based case
works as well. However, for the present purposes this is good enough.
"""
if not (operator in ('&', '|') and
(left._user_inverse or right._user_inverse)):
# These are the only operators that support an inverse right now
return None
try:
left_inv = left.inverse
right_inv = right.inverse
except NotImplementedError:
# If either inverse is undefined then just return False; this
# means the normal _CompoundModel.inverse routine will fail
# naturally anyways, since it requires all sub-models to have
# an inverse defined
return None
if operator == '&':
return left_inv & right_inv
else:
return right_inv | left_inv
# TODO: Perhaps, just perhaps, the post-order (or ???-order) ordering of
# leaf nodes is something the ExpressionTree class itself could just know
def _get_submodels(cls):
# Would make this a lazyproperty but those don't currently work with
# type objects
if cls._submodels is not None:
return cls._submodels
submodels = [c.value for c in cls._tree.traverse_postorder()
if c.isleaf]
cls._submodels = submodels
return submodels
def _init_param_descriptors(cls):
"""
This routine sets up the names for all the parameters on a compound
model, including figuring out unique names for those parameters and
also mapping them back to their associated parameters of the underlying
submodels.
Setting this all up is costly, and only necessary for compound models
that a user will directly interact with. For example when building an
expression like::
>>> M = (Model1 + Model2) * Model3 # doctest: +SKIP
the user will generally never interact directly with the temporary
result of the subexpression ``(Model1 + Model2)``. So there's no need
to setup all the parameters for that temporary throwaway. Only once
the full expression is built and the user initializes or introspects
``M`` is it necessary to determine its full parameterization.
"""
# Accessing cls.param_names will implicitly call _init_param_names if
# needed and thus also set up the _param_map; I'm not crazy about that
# design but it stands for now
for param_name in cls.param_names:
submodel_idx, submodel_param = cls._param_map[param_name]
submodel = cls[submodel_idx]
orig_param = getattr(submodel, submodel_param, None)
if isinstance(submodel, Model):
# Take the parameter's default from the model's value for that
# parameter
default = orig_param.value
else:
default = orig_param.default
# Copy constraints
constraints = dict((key, getattr(orig_param, key))
for key in Model.parameter_constraints)
# Note: Parameter.copy() returns a new unbound Parameter, never
# a bound Parameter even if submodel is a Model instance (as
# opposed to a Model subclass)
new_param = orig_param.copy(name=param_name, default=default,
unit=orig_param.unit,
**constraints)
setattr(cls, param_name, new_param)
def _init_param_names(cls):
"""
This subroutine is solely for setting up the ``param_names`` attribute
itself.
See ``_init_param_descriptors`` for the full parameter setup.
"""
# Currently this skips over Model *instances* in the expression tree;
# basically these are treated as constants and do not add
# fittable/tunable parameters to the compound model.
# TODO: I'm not 100% happy with this design, and maybe we need some
# interface for distinguishing fittable/settable parameters with
# *constant* parameters (which would be distinct from parameters with
# fixed constraints since they're permanently locked in place). But I'm
# not sure if this is really the best way to treat the issue.
names = []
param_map = {}
# Start counting the suffix indices to put on parameter names from the
# slice_offset. Usually this will just be zero, but for compound
# models that were sliced from another compound model this may be > 0
param_suffix = cls._slice_offset
for idx, model in enumerate(cls._get_submodels()):
if not model.param_names:
# Skip models that don't have parameters in the numbering
# TODO: Reevaluate this if it turns out to be confusing, though
# parameter-less models are not very common in practice (there
# are a few projections that don't take parameters)
continue
for param_name in model.param_names:
# This is sort of heuristic, but we want to check that
# model.param_name *actually* returns a Parameter descriptor,
# and that the model isn't some inconsistent type that happens
# to have a param_names attribute but does not actually
# implement settable parameters.
# In the future we can probably remove this check, but this is
# here specifically to support the legacy compat
# _CompositeModel which can be considered a pathological case
# in the context of the new framework
# if not isinstance(getattr(model, param_name, None),
# Parameter):
# break
name = '{0}_{1}'.format(param_name, param_suffix + idx)
names.append(name)
param_map[name] = (idx, param_name)
cls._param_names = tuple(names)
cls._param_map = param_map
cls._param_map_inverse = dict((v, k) for k, v in param_map.items())
def _format_expression(cls):
# TODO: At some point might be useful to make a public version of this,
# albeit with more formatting options
return cls._tree.format_expression(OPERATOR_PRECEDENCE)
def _format_components(cls):
return '\n\n'.join('[{0}]: {1!r}'.format(idx, m)
for idx, m in enumerate(cls._get_submodels()))
def _normalize_index(cls, index):
"""
Converts an index given to __getitem__ to either an integer, or
a slice with integer start and stop values.
If the length of the slice is exactly 1 this converts the index to a
simple integer lookup.
Negative integers are converted to positive integers.
"""
def get_index_from_name(name):
try:
return cls.submodel_names.index(name)
except ValueError:
raise IndexError(
'Compound model {0} does not have a component named '
'{1}'.format(cls.name, name))
def check_for_negative_index(index):
if index < 0:
new_index = len(cls.submodel_names) + index
if new_index < 0:
# If still < 0 then this is an invalid index
raise IndexError(
"Model index {0} out of range.".format(index))
else:
index = new_index
return index
if isinstance(index, str):
return get_index_from_name(index)
elif isinstance(index, slice):
if index.step not in (1, None):
# In principle it could be but I can scarcely imagine a case
# where it would be useful. If someone can think of one then
# we can enable it.
raise ValueError(
"Step not supported for compound model slicing.")
start = index.start if index.start is not None else 0
stop = (index.stop
if index.stop is not None else len(cls.submodel_names))
if isinstance(start, (int, np.integer)):
start = check_for_negative_index(start)
if isinstance(stop, (int, np.integer)):
stop = check_for_negative_index(stop)
if isinstance(start, str):
start = get_index_from_name(start)
if isinstance(stop, str):
stop = get_index_from_name(stop) + 1
length = stop - start
if length == 1:
return start
elif length <= 0:
raise ValueError("Empty slice of a compound model.")
return slice(start, stop)
elif isinstance(index, (int, np.integer)):
if index >= len(cls.submodel_names):
raise IndexError(
"Model index {0} out of range.".format(index))
return check_for_negative_index(index)
raise TypeError(
'Submodels can be indexed either by their integer order or '
'their name (got {0!r}).'.format(index))
def _get_slice(cls, start, stop):
"""
Return a new model build from a sub-expression of the expression
represented by this model.
Right now this is highly inefficient, as it creates a new temporary
model for each operator that appears in the sub-expression. It would
be better if this just built a new expression tree, and the new model
instantiated directly from that tree.
Once tree -> model instantiation is possible this should be fixed to
use that instead.
"""
members = {'_slice_offset': cls._slice_offset + start}
operators = dict((oper, _model_oper(oper, additional_members=members))
for oper in BINARY_OPERATORS)
return cls._tree.evaluate(operators, start=start, stop=stop)
@staticmethod
def _model_evaluate_getter(idx, model):
n_params = len(model.param_names)
n_inputs = model.n_inputs
n_outputs = model.n_outputs
# If model is not an instance, we need to instantiate it to make sure
# that we can call _validate_input_units (since e.g. input_units can
# be an instance property).
def evaluate_wrapper(model, inputs, param_values):
inputs = model._validate_input_units(inputs)
outputs = model.evaluate(*inputs, *param_values)
if n_outputs == 1:
outputs = (outputs,)
return model._process_output_units(inputs, outputs)
if isinstance(model, Model):
def f(inputs, params):
param_values = tuple(islice(params, n_params))
return evaluate_wrapper(model, inputs, param_values)
else:
# Where previously model was a class, now make an instance
def f(inputs, params):
param_values = tuple(islice(params, n_params))
m = model(*param_values)
return evaluate_wrapper(m, inputs, param_values)
return (f, n_inputs, n_outputs)
class _CompoundModel(Model, metaclass=_CompoundModelMeta):
fit_deriv = None
col_fit_deriv = False
_submodels = None
def __str__(self):
expression = self._format_expression()
components = self._format_components()
keywords = [
('Expression', expression),
('Components', '\n' + indent(components))
]
return super()._format_str(keywords=keywords)
def _generate_input_output_units_dict(self, mapping, attr):
"""
This method is used to transform dict or bool settings from
submodels into a single dictionary for the composite model,
taking into account renaming of input parameters.
"""
d = {}
for inp, (model, orig_inp) in mapping.items():
mattr = getattr(model, attr)
if isinstance(mattr, dict):
if orig_inp in mattr:
d[inp] = mattr[orig_inp]
elif isinstance(mattr, bool):
d[inp] = mattr
if d: # Note that if d is empty, we just return None
return d
@property
def _supports_unit_fitting(self):
return False
@property
def input_units_allow_dimensionless(self):
return self._generate_input_output_units_dict(self._tree.inputs_map,
'input_units_allow_dimensionless')
@property
def input_units_strict(self):
return self._generate_input_output_units_dict(self._tree.inputs_map,
'input_units_strict')
@property
def input_units(self):
return self._generate_input_output_units_dict(self._tree.inputs_map, 'input_units')
@property
def input_units_equivalencies(self):
return self._generate_input_output_units_dict(self._tree.inputs_map,
'input_units_equivalencies')
@property
def return_units(self):
return self._generate_input_output_units_dict(self._tree.outputs_map,
'return_units')
def __getattr__(self, attr):
# This __getattr__ is necessary, because _CompoundModelMeta creates
# Parameter descriptors *lazily*--they do not exist in the class
# __dict__ until one of them has been accessed.
# However, this is at odds with how Python looks up descriptors (see
# (https://docs.python.org/3/reference/datamodel.html#invoking-descriptors)
# which is to look directly in the class __dict__
# This workaround allows descriptors to work correctly when they are
# not initially found in the class __dict__
value = getattr(self.__class__, attr)
if hasattr(value, '__get__'):
# Object is a descriptor, so we should really return the result of
# its __get__
value = value.__get__(self, self.__class__)
return value
def __getitem__(self, index):
index = self.__class__._normalize_index(index)
model = self.__class__[index]
if isinstance(index, slice):
param_names = model.param_names
else:
param_map = self.__class__._param_map_inverse
param_names = tuple(param_map[index, name]
for name in model.param_names)
return model._from_existing(self, param_names)
@property
def submodel_names(self):
return self.__class__.submodel_names
@sharedmethod
def n_submodels(self):
return len(self.submodel_names)
@property
def param_names(self):
return self.__class__.param_names
@property
def fittable(self):
return self.__class__.fittable
@sharedmethod
def evaluate(self, *args):
return self.__class__.evaluate(*args)
# TODO: The way this works is highly inefficient--the inverse is created by
# making a new model for each operator in the compound model, which could
# potentially mean creating a large number of temporary throwaway model
# classes. This can definitely be optimized in the future by implementing
# a way to construct a single model class from an existing tree
@property
def inverse(self):
def _not_implemented(oper):
def _raise(x, y):
raise NotImplementedError(
"The inverse is not currently defined for compound "
"models created using the {0} operator.".format(oper))
return _raise
operators = dict((oper, _not_implemented(oper))
for oper in ('+', '-', '*', '/', '**'))
operators['&'] = operator.and_
# Reverse the order of compositions
operators['|'] = lambda x, y: operator.or_(y, x)
def getter(idx, model):
try:
# By indexing on self[] this will return an instance of the
# model, with all the appropriate parameters set, which is
# currently required to return an inverse
return self[idx].inverse
except NotImplementedError:
raise NotImplementedError(
"All models in a composite model must have an inverse "
"defined in order for the composite model to have an "
"inverse. {0!r} does not have an inverse.".format(model))
return self._tree.evaluate(operators, getter=getter)
@sharedmethod
def _get_submodels(self):
return self.__class__._get_submodels()
def _parameter_units_for_data_units(self, input_units, output_units):
units_for_data = {}
for imodel, model in enumerate(self._submodels):
units_for_data_sub = model._parameter_units_for_data_units(input_units, output_units)
for param_sub in units_for_data_sub:
param = self._param_map_inverse[(imodel, param_sub)]
units_for_data[param] = units_for_data_sub[param_sub]
return units_for_data
def deepcopy(self):
"""
Return a deep copy of a compound model.
"""
new_model = self.copy()
new_model._submodels = [model.deepcopy() for model in self._submodels]
return new_model
def custom_model(*args, fit_deriv=None, **kwargs):
"""
Create a model from a user defined function. The inputs and parameters of
the model will be inferred from the arguments of the function.
This can be used either as a function or as a decorator. See below for
examples of both usages.
.. note::
All model parameters have to be defined as keyword arguments with
default values in the model function. Use `None` as a default argument
value if you do not want to have a default value for that parameter.
Parameters
----------
func : function
Function which defines the model. It should take N positional
arguments where ``N`` is dimensions of the model (the number of
independent variable in the model), and any number of keyword arguments
(the parameters). It must return the value of the model (typically as
an array, but can also be a scalar for scalar inputs). This
corresponds to the `~astropy.modeling.Model.evaluate` method.
fit_deriv : function, optional
Function which defines the Jacobian derivative of the model. I.e., the
derivative with respect to the *parameters* of the model. It should
have the same argument signature as ``func``, but should return a
sequence where each element of the sequence is the derivative
with respect to the corresponding argument. This corresponds to the
:meth:`~astropy.modeling.FittableModel.fit_deriv` method.
Examples
--------
Define a sinusoidal model function as a custom 1D model::
>>> from astropy.modeling.models import custom_model
>>> import numpy as np
>>> def sine_model(x, amplitude=1., frequency=1.):
... return amplitude * np.sin(2 * np.pi * frequency * x)
>>> def sine_deriv(x, amplitude=1., frequency=1.):
... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x)
>>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv)
Create an instance of the custom model and evaluate it::
>>> model = SineModel()
>>> model(0.25)
1.0
This model instance can now be used like a usual astropy model.
The next example demonstrates a 2D Moffat function model, and also
demonstrates the support for docstrings (this example could also include
a derivative, but it has been omitted for simplicity)::
>>> @custom_model
... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0,
... alpha=1.0):
... \"\"\"Two dimensional Moffat function.\"\"\"
... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
... return amplitude * (1 + rr_gg) ** (-alpha)
...
>>> print(Moffat2D.__doc__)
Two dimensional Moffat function.
>>> model = Moffat2D()
>>> model(1, 1) # doctest: +FLOAT_CMP
0.3333333333333333
"""
if kwargs:
warnings.warn(
"Function received unexpected arguments ({}) these "
"are ignored but will raise an Exception in the "
"future.".format(list(kwargs)),
AstropyDeprecationWarning)
if len(args) == 1 and callable(args[0]):
return _custom_model_wrapper(args[0], fit_deriv=fit_deriv)
elif not args:
return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv)
else:
raise TypeError(
"{0} takes at most one positional argument (the callable/"
"function to be turned into a model. When used as a decorator "
"it should be passed keyword arguments only (if "
"any).".format(__name__))
def _custom_model_wrapper(func, fit_deriv=None):
"""
Internal implementation `custom_model`.
When `custom_model` is called as a function its arguments are passed to
this function, and the result of this function is returned.
When `custom_model` is used as a decorator a partial evaluation of this
function is returned by `custom_model`.
"""
if not callable(func):
raise ModelDefinitionError(
"func is not callable; it must be a function or other callable "
"object")
if fit_deriv is not None and not callable(fit_deriv):
raise ModelDefinitionError(
"fit_deriv not callable; it must be a function or other "
"callable object")
model_name = func.__name__
inputs, params = get_inputs_and_params(func)
if (fit_deriv is not None and
len(fit_deriv.__defaults__) != len(params)):
raise ModelDefinitionError("derivative function should accept "
"same number of parameters as func.")
# TODO: Maybe have a clever scheme for default output name?
if inputs:
output_names = (inputs[0].name,)
else:
output_names = ('x',)
params = dict((param.name, Parameter(param.name, default=param.default))
for param in params)
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = '__main__'
members = {
'__module__': str(modname),
'__doc__': func.__doc__,
'inputs': tuple(x.name for x in inputs),
'outputs': output_names,
'evaluate': staticmethod(func),
}
if fit_deriv is not None:
members['fit_deriv'] = staticmethod(fit_deriv)
members.update(params)
return type(model_name, (FittableModel,), members)
def render_model(model, arr=None, coords=None):
"""
Evaluates a model on an input array. Evaluation is limited to
a bounding box if the `Model.bounding_box` attribute is set.
Parameters
----------
model : `Model`
Model to be evaluated.
arr : `numpy.ndarray`, optional
Array on which the model is evaluated.
coords : array-like, optional
Coordinate arrays mapping to ``arr``, such that
``arr[coords] == arr``.
Returns
-------
array : `numpy.ndarray`
The model evaluated on the input ``arr`` or a new array from ``coords``.
If ``arr`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Examples
--------
:ref:`bounding-boxes`
"""
bbox = model.bounding_box
if (coords is None) & (arr is None) & (bbox is None):
raise ValueError('If no bounding_box is set, coords or arr must be input.')
# for consistent indexing
if model.n_inputs == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if arr is not None:
arr = arr.copy()
# Check dimensions match model
if arr.ndim != model.n_inputs:
raise ValueError('number of array dimensions inconsistent with '
'number of model inputs.')
if coords is not None:
# Check dimensions match arr and model
coords = np.array(coords)
if len(coords) != model.n_inputs:
raise ValueError('coordinate length inconsistent with the number '
'of model inputs.')
if arr is not None:
if coords[0].shape != arr.shape:
raise ValueError('coordinate shape inconsistent with the '
'array shape.')
else:
arr = np.zeros(coords[0].shape)
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = pos, delta = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))
for bb in bbox]).astype(int).T
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if arr is None:
arr = model(*sub_coords)
else:
try:
arr = add_array(arr, model(*sub_coords), pos)
except ValueError:
raise ValueError('The `bounding_box` is larger than the input'
' arr in one or more dimensions. Set '
'`model.bounding_box = None`.')
else:
if coords is None:
im_shape = arr.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
arr += model(*coords[::-1])
return arr
def _prepare_inputs_single_model(model, params, inputs, **kwargs):
broadcasts = []
for idx, _input in enumerate(inputs):
input_shape = _input.shape
# Ensure that array scalars are always upgrade to 1-D arrays for the
# sake of consistency with how parameters work. They will be cast back
# to scalars at the end
if not input_shape:
inputs[idx] = _input.reshape((1,))
if not params:
max_broadcast = input_shape
else:
max_broadcast = ()
for param in params:
try:
if model.standard_broadcasting:
broadcast = check_broadcast(input_shape, param.shape)
else:
broadcast = input_shape
except IncompatibleShapeError:
raise ValueError(
"Model input argument {0!r} of shape {1!r} cannot be "
"broadcast with parameter {2!r} of shape "
"{3!r}.".format(model.inputs[idx], input_shape,
param.name, param.shape))
if len(broadcast) > len(max_broadcast):
max_broadcast = broadcast
elif len(broadcast) == len(max_broadcast):
max_broadcast = max(max_broadcast, broadcast)
broadcasts.append(max_broadcast)
if model.n_outputs > model.n_inputs:
if len(set(broadcasts)) > 1:
raise ValueError(
"For models with n_outputs > n_inputs, the combination of "
"all inputs and parameters must broadcast to the same shape, "
"which will be used as the shape of all outputs. In this "
"case some of the inputs had different shapes, so it is "
"ambiguous how to format outputs for this model. Try using "
"inputs that are all the same size and shape.")
else:
# Extend the broadcasts list to include shapes for all outputs
extra_outputs = model.n_outputs - model.n_inputs
if not broadcasts:
# If there were no inputs then the broadcasts list is empty
# just add a None since there is no broadcasting of outputs and
# inputs necessary (see _prepare_outputs_single_model)
broadcasts.append(None)
broadcasts.extend([broadcasts[0]] * extra_outputs)
return inputs, (broadcasts,)
def _prepare_outputs_single_model(model, outputs, format_info):
broadcasts = format_info[0]
outputs = list(outputs)
for idx, output in enumerate(outputs):
broadcast_shape = broadcasts[idx]
if broadcast_shape is not None:
if not broadcast_shape:
# Shape is (), i.e. a scalar should be returned
outputs[idx] = np.asscalar(output)
else:
outputs[idx] = output.reshape(broadcast_shape)
return tuple(outputs)
def _prepare_inputs_model_set(model, params, inputs, n_models, model_set_axis,
**kwargs):
reshaped = []
pivots = []
for idx, _input in enumerate(inputs):
max_param_shape = ()
if n_models > 1 and model_set_axis is not False:
# Use the shape of the input *excluding* the model axis
input_shape = (_input.shape[:model_set_axis] +
_input.shape[model_set_axis + 1:])
else:
input_shape = _input.shape
for param in params:
try:
check_broadcast(input_shape, param.shape)
except IncompatibleShapeError:
raise ValueError(
"Model input argument {0!r} of shape {1!r} cannot be "
"broadcast with parameter {2!r} of shape "
"{3!r}.".format(model.inputs[idx], input_shape,
param.name, param.shape))
if len(param.shape) > len(max_param_shape):
max_param_shape = param.shape
# We've now determined that, excluding the model_set_axis, the
# input can broadcast with all the parameters
input_ndim = len(input_shape)
if model_set_axis is False:
if len(max_param_shape) > input_ndim:
# Just needs to prepend new axes to the input
n_new_axes = 1 + len(max_param_shape) - input_ndim
new_axes = (1,) * n_new_axes
new_shape = new_axes + _input.shape
pivot = model.model_set_axis
else:
pivot = input_ndim - len(max_param_shape)
new_shape = (_input.shape[:pivot] + (1,) +
_input.shape[pivot:])
new_input = _input.reshape(new_shape)
else:
if len(max_param_shape) >= input_ndim:
n_new_axes = len(max_param_shape) - input_ndim
pivot = model.model_set_axis
new_axes = (1,) * n_new_axes
new_shape = (_input.shape[:pivot + 1] + new_axes +
_input.shape[pivot + 1:])
new_input = _input.reshape(new_shape)
else:
pivot = _input.ndim - len(max_param_shape) - 1
new_input = np.rollaxis(_input, model_set_axis,
pivot + 1)
pivots.append(pivot)
reshaped.append(new_input)
if model.n_inputs < model.n_outputs:
pivots.extend([model_set_axis] * (model.n_outputs - model.n_inputs))
return reshaped, (pivots,)
def _prepare_outputs_model_set(model, outputs, format_info, model_set_axis):
pivots = format_info[0]
# If model_set_axis = False was passed then use
# model._model_set_axis to format the output.
if model_set_axis is None or model_set_axis is False:
model_set_axis = model.model_set_axis
outputs = list(outputs)
for idx, output in enumerate(outputs):
pivot = pivots[idx]
if pivot < output.ndim and pivot != model_set_axis:
outputs[idx] = np.rollaxis(output, pivot,
model_set_axis)
return tuple(outputs)
def _validate_input_shapes(inputs, argnames, n_models, model_set_axis,
validate_broadcasting):
"""
Perform basic validation of model inputs--that they are mutually
broadcastable and that they have the minimum dimensions for the given
model_set_axis.
If validation succeeds, returns the total shape that will result from
broadcasting the input arrays with each other.
"""
check_model_set_axis = n_models > 1 and model_set_axis is not False
if not (validate_broadcasting or check_model_set_axis):
# Nothing else needed here
return
all_shapes = []
for idx, _input in enumerate(inputs):
input_shape = np.shape(_input)
# Ensure that the input's model_set_axis matches the model's
# n_models
if input_shape and check_model_set_axis:
# Note: Scalar inputs *only* get a pass on this
if len(input_shape) < model_set_axis + 1:
raise ValueError(
"For model_set_axis={0}, all inputs must be at "
"least {1}-dimensional.".format(
model_set_axis, model_set_axis + 1))
elif input_shape[model_set_axis] != n_models:
try:
argname = argnames[idx]
except IndexError:
# the case of model.inputs = ()
argname = str(idx)
raise ValueError(
"Input argument {0!r} does not have the correct "
"dimensions in model_set_axis={1} for a model set with "
"n_models={2}.".format(argname, model_set_axis,
n_models))
all_shapes.append(input_shape)
if not validate_broadcasting:
return
try:
input_broadcast = check_broadcast(*all_shapes)
except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
arg_a = argnames[shape_a_idx]
arg_b = argnames[shape_b_idx]
raise ValueError(
"Model input argument {0!r} of shape {1!r} cannot "
"be broadcast with input {2!r} of shape {3!r}".format(
arg_a, shape_a, arg_b, shape_b))
return input_broadcast
copyreg.pickle(_ModelMeta, _ModelMeta.__reduce__)
copyreg.pickle(_CompoundModelMeta, _CompoundModelMeta.__reduce__)
|
06be30a2d143e4a7c2f2a06bbafa4540044ae6b5fa0b9f08a160e4ef8401d451 | """
Special models useful for complex compound models where control is needed over
which outputs from a source model are mapped to which inputs of a target model.
"""
from .core import FittableModel
__all__ = ['Mapping', 'Identity']
class Mapping(FittableModel):
"""
Allows inputs to be reordered, duplicated or dropped.
Parameters
----------
mapping : tuple
A tuple of integers representing indices of the inputs to this model
to return and in what order to return them. See
:ref:`compound-model-mappings` for more details.
n_inputs : int
Number of inputs; if `None` (default) then ``max(mapping) + 1`` is
used (i.e. the highest input index used in the mapping).
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict-like
Free-form metadata to associate with this model.
Raises
------
TypeError
Raised when number of inputs is less that ``max(mapping)``.
Examples
--------
>>> from astropy.modeling.models import Polynomial2D, Shift, Mapping
>>> poly1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3)
>>> poly2 = Polynomial2D(1, c0_0=1, c1_0=2.4, c0_1=2.1)
>>> model = (Shift(1) & Shift(2)) | Mapping((0, 1, 0, 1)) | (poly1 & poly2)
>>> model(1, 2) # doctest: +FLOAT_CMP
(17.0, 14.2)
"""
linear = True # FittableModel is non-linear by default
def __init__(self, mapping, n_inputs=None, name=None, meta=None):
if n_inputs is None:
self._inputs = tuple('x' + str(idx)
for idx in range(max(mapping) + 1))
else:
self._inputs = tuple('x' + str(idx)
for idx in range(n_inputs))
self._outputs = tuple('x' + str(idx) for idx in range(len(mapping)))
self._mapping = mapping
self.input_units_strict = {key: False for key in self._inputs}
self.input_units_allow_dimensionless = {key: False for key in self._inputs}
super().__init__(name=name, meta=meta)
@property
def inputs(self):
"""
The name(s) of the input variable(s) on which a model is evaluated.
"""
return self._inputs
@property
def outputs(self):
"""The name(s) of the output(s) of the model."""
return self._outputs
@property
def mapping(self):
"""Integers representing indices of the inputs."""
return self._mapping
def __repr__(self):
if self.name is None:
return '<Mapping({0})>'.format(self.mapping)
else:
return '<Mapping({0}, name={1})>'.format(self.mapping, self.name)
def evaluate(self, *args):
if len(args) != self.n_inputs:
name = self.name if self.name is not None else "Mapping"
raise TypeError('{0} expects {1} inputs; got {2}'.format(
name, self.n_inputs, len(args)))
result = tuple(args[idx] for idx in self._mapping)
if self.n_outputs == 1:
return result[0]
return result
@property
def inverse(self):
"""
A `Mapping` representing the inverse of the current mapping.
Raises
------
`NotImplementedError`
An inverse does no exist on mappings that drop some of its inputs
(there is then no way to reconstruct the inputs that were dropped).
"""
try:
mapping = tuple(self.mapping.index(idx)
for idx in range(self.n_inputs))
except ValueError:
raise NotImplementedError(
"Mappings such as {0} that drop one or more of their inputs "
"are not invertible at this time.".format(self.mapping))
inv = self.__class__(mapping)
inv._inputs = self._outputs
inv._outputs = self._inputs
return inv
class Identity(Mapping):
"""
Returns inputs unchanged.
This class is useful in compound models when some of the inputs must be
passed unchanged to the next model.
Parameters
----------
n_inputs : int
Specifies the number of inputs this identity model accepts.
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict-like
Free-form metadata to associate with this model.
Examples
--------
Transform ``(x, y)`` by a shift in x, followed by scaling the two inputs::
>>> from astropy.modeling.models import (Polynomial1D, Shift, Scale,
... Identity)
>>> model = (Shift(1) & Identity(1)) | Scale(1.2) & Scale(2)
>>> model(1,1) # doctest: +FLOAT_CMP
(2.4, 2.0)
>>> model.inverse(2.4, 2) # doctest: +FLOAT_CMP
(1.0, 1.0)
"""
linear = True # FittableModel is non-linear by default
def __init__(self, n_inputs, name=None, meta=None):
mapping = tuple(range(n_inputs))
super().__init__(mapping, name=name, meta=meta)
def __repr__(self):
if self.name is None:
return '<Identity({0})>'.format(self.n_inputs)
else:
return '<Identity({0}, name={1})>'.format(self.n_inputs, self.name)
@property
def inverse(self):
"""
The inverse transformation.
In this case of `Identity`, ``self.inverse is self``.
"""
return self
|
7cfbe827c32cfc9cb942362b2defe9cfeb024db9ff764cfb593bd617c210acb7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Model and functions related to blackbody radiation.
.. _blackbody-planck-law:
Blackbody Radiation
-------------------
Blackbody flux is calculated with Planck law
(:ref:`Rybicki & Lightman 1979 <ref-rybicki1979>`):
.. math::
B_{\\lambda}(T) = \\frac{2 h c^{2} / \\lambda^{5}}{exp(h c / \\lambda k T) - 1}
B_{\\nu}(T) = \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1}
where the unit of :math:`B_{\\lambda}(T)` is
:math:`erg \\; s^{-1} cm^{-2} \\mathring{A}^{-1} sr^{-1}`, and
:math:`B_{\\nu}(T)` is :math:`erg \\; s^{-1} cm^{-2} Hz^{-1} sr^{-1}`.
:func:`~astropy.modeling.blackbody.blackbody_lambda` and
:func:`~astropy.modeling.blackbody.blackbody_nu` calculate the
blackbody flux for :math:`B_{\\lambda}(T)` and :math:`B_{\\nu}(T)`,
respectively.
For blackbody representation as a model, see :class:`BlackBody1D`.
.. _blackbody-examples:
Examples
^^^^^^^^
>>> import numpy as np
>>> from astropy import units as u
>>> from astropy.modeling.blackbody import blackbody_lambda, blackbody_nu
Calculate blackbody flux for 5000 K at 100 and 10000 Angstrom while suppressing
any Numpy warnings:
>>> wavelengths = [100, 10000] * u.AA
>>> temperature = 5000 * u.K
>>> with np.errstate(all='ignore'):
... flux_lam = blackbody_lambda(wavelengths, temperature)
... flux_nu = blackbody_nu(wavelengths, temperature)
>>> flux_lam # doctest: +FLOAT_CMP
<Quantity [ 1.27452545e-108, 7.10190526e+005] erg / (Angstrom cm2 s sr)>
>>> flux_nu # doctest: +FLOAT_CMP
<Quantity [ 4.25135927e-123, 2.36894060e-005] erg / (cm2 Hz s sr)>
Plot a blackbody spectrum for 5000 K:
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy.modeling.blackbody import blackbody_lambda
temperature = 5000 * u.K
wavemax = (const.b_wien / temperature).to(u.AA) # Wien's displacement law
waveset = np.logspace(
0, np.log10(wavemax.value + 10 * wavemax.value), num=1000) * u.AA
with np.errstate(all='ignore'):
flux = blackbody_lambda(waveset, temperature)
fig, ax = plt.subplots(figsize=(8, 5))
ax.plot(waveset.value, flux.value)
ax.axvline(wavemax.value, ls='--')
ax.get_yaxis().get_major_formatter().set_powerlimits((0, 1))
ax.set_xlabel(r'$\\lambda$ ({0})'.format(waveset.unit))
ax.set_ylabel(r'$B_{\\lambda}(T)$')
ax.set_title('Blackbody, T = {0}'.format(temperature))
Note that an array of temperatures can also be given instead of a single
temperature. In this case, the Numpy broadcasting rules apply: for instance, if
the frequency and temperature have the same shape, the output will have this
shape too, while if the frequency is a 2-d array with shape ``(n, m)`` and the
temperature is an array with shape ``(m,)``, the output will have a shape
``(n, m)``.
See Also
^^^^^^^^
.. _ref-rybicki1979:
Rybicki, G. B., & Lightman, A. P. 1979, Radiative Processes in Astrophysics (New York, NY: Wiley)
"""
import warnings
from collections import OrderedDict
import numpy as np
from .core import Fittable1DModel
from .parameters import Parameter
from .. import constants as const
from .. import units as u
from ..utils.exceptions import AstropyUserWarning
__all__ = ['BlackBody1D', 'blackbody_nu', 'blackbody_lambda']
# Units
FNU = u.erg / (u.cm**2 * u.s * u.Hz)
FLAM = u.erg / (u.cm**2 * u.s * u.AA)
# Some platform implementations of expm1() are buggy and Numpy uses
# them anyways--the bug is that on certain large inputs it returns
# NaN instead of INF like it should (it should only return NaN on a
# NaN input
# See https://github.com/astropy/astropy/issues/4171
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
_has_buggy_expm1 = np.isnan(np.expm1(1000)) or np.isnan(np.expm1(1e10))
class BlackBody1D(Fittable1DModel):
"""
One dimensional blackbody model.
Parameters
----------
temperature : :class:`~astropy.units.Quantity`
Blackbody temperature.
bolometric_flux : :class:`~astropy.units.Quantity`
The bolometric flux of the blackbody (i.e., the integral over the
spectral axis).
Notes
-----
Model formula:
.. math:: f(x) = \\pi B_{\\nu} f_{\\text{bolometric}} / (\\sigma T^{4})
Examples
--------
>>> from astropy.modeling import models
>>> from astropy import units as u
>>> bb = models.BlackBody1D()
>>> bb(6000 * u.AA) # doctest: +FLOAT_CMP
<Quantity 1.3585381201978953e-15 erg / (cm2 Hz s)>
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import BlackBody1D
from astropy.modeling.blackbody import FLAM
from astropy import units as u
from astropy.visualization import quantity_support
bb = BlackBody1D(temperature=5778*u.K)
wav = np.arange(1000, 110000) * u.AA
flux = bb(wav).to(FLAM, u.spectral_density(wav))
with quantity_support():
plt.figure()
plt.semilogx(wav, flux)
plt.axvline(bb.lambda_max.to(u.AA).value, ls='--')
plt.show()
"""
# We parametrize this model with a temperature and a bolometric flux. The
# bolometric flux is the integral of the model over the spectral axis. This
# is more useful than simply having an amplitude parameter.
temperature = Parameter(default=5000, min=0, unit=u.K)
bolometric_flux = Parameter(default=1, unit=u.erg / u.cm ** 2 / u.s)
# We allow values without units to be passed when evaluating the model, and
# in this case the input x values are assumed to be frequencies in Hz.
_input_units_allow_dimensionless = True
# We enable the spectral equivalency by default for the spectral axis
input_units_equivalencies = {'x': u.spectral()}
def evaluate(self, x, temperature, bolometric_flux):
"""Evaluate the model.
Parameters
----------
x : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Frequency at which to compute the blackbody. If no units are given,
this defaults to Hz.
temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Temperature of the blackbody. If no units are given, this defaults
to Kelvin.
bolometric_flux : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Desired integral for the blackbody.
Returns
-------
y : number or ndarray
Blackbody spectrum. The units are determined from the units of
``bolometric_flux``.
"""
# We need to make sure that we attach units to the temperature if it
# doesn't have any units. We do this because even though blackbody_nu
# can take temperature values without units, the / temperature ** 4
# factor needs units to be defined.
if isinstance(temperature, u.Quantity):
temperature = temperature.to(u.K, equivalencies=u.temperature())
else:
temperature = u.Quantity(temperature, u.K)
# We normalize the returned blackbody so that the integral would be
# unity, and we then multiply by the bolometric flux. A normalized
# blackbody has f_nu = pi * B_nu / (sigma * T^4), which is what we
# calculate here. We convert to 1/Hz to make sure the units are
# simplified as much as possible, then we multiply by the bolometric
# flux to get the normalization right.
fnu = ((np.pi * u.sr * blackbody_nu(x, temperature) /
const.sigma_sb / temperature ** 4).to(1 / u.Hz) *
bolometric_flux)
# If the bolometric_flux parameter has no unit, we should drop the /Hz
# and return a unitless value. This occurs for instance during fitting,
# since we drop the units temporarily.
if hasattr(bolometric_flux, 'unit'):
return fnu
else:
return fnu.value
@property
def input_units(self):
# The input units are those of the 'x' value, which should always be
# Hz. Because we do this, and because input_units_allow_dimensionless
# is set to True, dimensionless values are assumed to be in Hz.
return {'x': u.Hz}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('temperature', u.K),
('bolometric_flux', outputs_unit['y'] * u.Hz)])
@property
def lambda_max(self):
"""Peak wavelength when the curve is expressed as power density."""
return const.b_wien / self.temperature
def blackbody_nu(in_x, temperature):
"""Calculate blackbody flux per steradian, :math:`B_{\\nu}(T)`.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Parameters
----------
in_x : number, array-like, or `~astropy.units.Quantity`
Frequency, wavelength, or wave number.
If not a Quantity, it is assumed to be in Hz.
temperature : number, array-like, or `~astropy.units.Quantity`
Blackbody temperature.
If not a Quantity, it is assumed to be in Kelvin.
Returns
-------
flux : `~astropy.units.Quantity`
Blackbody monochromatic flux in
:math:`erg \\; cm^{-2} s^{-1} Hz^{-1} sr^{-1}`.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
# Convert to units for calculations, also force double precision
with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
freq = u.Quantity(in_x, u.Hz, dtype=np.float64)
temp = u.Quantity(temperature, u.K, dtype=np.float64)
# Check if input values are physically possible
if np.any(temp < 0):
raise ValueError('Temperature should be positive: {0}'.format(temp))
if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
warnings.warn('Input contains invalid wavelength/frequency value(s)',
AstropyUserWarning)
log_boltz = const.h * freq / (const.k_B * temp)
boltzm1 = np.expm1(log_boltz)
if _has_buggy_expm1:
# Replace incorrect nan results with infs--any result of 'nan' is
# incorrect unless the input (in log_boltz) happened to be nan to begin
# with. (As noted in #4393 ideally this would be replaced by a version
# of expm1 that doesn't have this bug, rather than fixing incorrect
# results after the fact...)
boltzm1_nans = np.isnan(boltzm1)
if np.any(boltzm1_nans):
if boltzm1.isscalar and not np.isnan(log_boltz):
boltzm1 = np.inf
else:
boltzm1[np.where(~np.isnan(log_boltz) & boltzm1_nans)] = np.inf
# Calculate blackbody flux
bb_nu = (2.0 * const.h * freq ** 3 / (const.c ** 2 * boltzm1))
flux = bb_nu.to(FNU, u.spectral_density(freq))
return flux / u.sr # Add per steradian to output flux unit
def blackbody_lambda(in_x, temperature):
"""Like :func:`blackbody_nu` but for :math:`B_{\\lambda}(T)`.
Parameters
----------
in_x : number, array-like, or `~astropy.units.Quantity`
Frequency, wavelength, or wave number.
If not a Quantity, it is assumed to be in Angstrom.
temperature : number, array-like, or `~astropy.units.Quantity`
Blackbody temperature.
If not a Quantity, it is assumed to be in Kelvin.
Returns
-------
flux : `~astropy.units.Quantity`
Blackbody monochromatic flux in
:math:`erg \\; cm^{-2} s^{-1} \\mathring{A}^{-1} sr^{-1}`.
"""
if getattr(in_x, 'unit', None) is None:
in_x = u.Quantity(in_x, u.AA)
bb_nu = blackbody_nu(in_x, temperature) * u.sr # Remove sr for conversion
flux = bb_nu.to(FLAM, u.spectral_density(in_x))
return flux / u.sr # Add per steradian to output flux unit
|
93b4586df2452d12031cbfb5d4aecbe2e37e6d5e0c7ecbe522ecc08d247596bd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Implements rotations, including spherical rotations as defined in WCS Paper II
[1]_
`RotateNative2Celestial` and `RotateCelestial2Native` follow the convention in
WCS Paper II to rotate to/from a native sphere and the celestial sphere.
The implementation uses `EulerAngleRotation`. The model parameters are
three angles: the longitude (``lon``) and latitude (``lat``) of the fiducial point
in the celestial system (``CRVAL`` keywords in FITS), and the longitude of the celestial
pole in the native system (``lon_pole``). The Euler angles are ``lon+90``, ``90-lat``
and ``-(lon_pole-90)``.
References
----------
.. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II)
"""
import math
import numpy as np
from .core import Model
from .parameters import Parameter
from ..coordinates.matrix_utilities import rotation_matrix, matrix_product
from .. import units as u
from ..utils.decorators import deprecated
from .utils import _to_radian, _to_orig_unit
__all__ = ['RotateCelestial2Native', 'RotateNative2Celestial', 'Rotation2D',
'EulerAngleRotation']
class _EulerRotation:
"""
Base class which does the actual computation.
"""
_separable = False
def _create_matrix(self, phi, theta, psi, axes_order):
matrices = []
for angle, axis in zip([phi, theta, psi], axes_order):
if isinstance(angle, u.Quantity):
angle = angle.value
angle = np.asscalar(angle)
matrices.append(rotation_matrix(angle, axis, unit=u.rad))
result = matrix_product(*matrices[::-1])
return result
@staticmethod
def spherical2cartesian(alpha, delta):
alpha = np.deg2rad(alpha)
delta = np.deg2rad(delta)
x = np.cos(alpha) * np.cos(delta)
y = np.cos(delta) * np.sin(alpha)
z = np.sin(delta)
return np.array([x, y, z])
@staticmethod
def cartesian2spherical(x, y, z):
h = np.hypot(x, y)
alpha = np.rad2deg(np.arctan2(y, x))
delta = np.rad2deg(np.arctan2(z, h))
return alpha, delta
@deprecated(2.0)
@staticmethod
def rotation_matrix_from_angle(angle):
"""
Clockwise rotation matrix.
Parameters
----------
angle : float
Rotation angle in radians.
"""
return np.array([[math.cos(angle), math.sin(angle)],
[-math.sin(angle), math.cos(angle)]])
def evaluate(self, alpha, delta, phi, theta, psi, axes_order):
shape = None
if isinstance(alpha, np.ndarray) and alpha.ndim == 2:
alpha = alpha.flatten()
delta = delta.flatten()
shape = alpha.shape
inp = self.spherical2cartesian(alpha, delta)
matrix = self._create_matrix(phi, theta, psi, axes_order)
result = np.dot(matrix, inp)
a, b = self.cartesian2spherical(*result)
if shape is not None:
a.shape = shape
b.shape = shape
return a, b
_input_units_strict = True
_input_units_allow_dimensionless = True
@property
def input_units(self):
""" Input units. """
return {'alpha': u.deg, 'delta': u.deg}
@property
def return_units(self):
""" Output units. """
return {'alpha': u.deg, 'delta': u.deg}
class EulerAngleRotation(_EulerRotation, Model):
"""
Implements Euler angle intrinsic rotations.
Rotates one coordinate system into another (fixed) coordinate system.
All coordinate systems are right-handed. The sign of the angles is
determined by the right-hand rule..
Parameters
----------
phi, theta, psi : float or `~astropy.units.Quantity`
"proper" Euler angles in deg.
If floats, they should be in deg.
axes_order : str
A 3 character string, a combination of 'x', 'y' and 'z',
where each character denotes an axis in 3D space.
"""
inputs = ('alpha', 'delta')
outputs = ('alpha', 'delta')
phi = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
theta = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
psi = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
def __init__(self, phi, theta, psi, axes_order, **kwargs):
self.axes = ['x', 'y', 'z']
if len(axes_order) != 3:
raise TypeError(
"Expected axes_order to be a character sequence of length 3,"
"got {0}".format(axes_order))
unrecognized = set(axes_order).difference(self.axes)
if unrecognized:
raise ValueError("Unrecognized axis label {0}; "
"should be one of {1} ".format(unrecognized, self.axes))
self.axes_order = axes_order
qs = [isinstance(par, u.Quantity) for par in [phi, theta, psi]]
if any(qs) and not all(qs):
raise TypeError("All parameters should be of the same type - float or Quantity.")
super().__init__(phi=phi, theta=theta, psi=psi, **kwargs)
def inverse(self):
return self.__class__(phi=-self.psi,
theta=-self.theta,
psi=-self.phi,
axes_order=self.axes_order[::-1])
def evaluate(self, alpha, delta, phi, theta, psi):
a, b = super().evaluate(alpha, delta, phi, theta, psi, self.axes_order)
return a, b
class _SkyRotation(_EulerRotation, Model):
"""
Base class for RotateNative2Celestial and RotateCelestial2Native.
"""
lon = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
lat = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
lon_pole = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
def __init__(self, lon, lat, lon_pole, **kwargs):
qs = [isinstance(par, u.Quantity) for par in [lon, lat, lon_pole]]
if any(qs) and not all(qs):
raise TypeError("All parameters should be of the same type - float or Quantity.")
super().__init__(lon, lat, lon_pole, **kwargs)
self.axes_order = 'zxz'
def _evaluate(self, phi, theta, lon, lat, lon_pole):
alpha, delta = super().evaluate(phi, theta, lon, lat, lon_pole,
self.axes_order)
mask = alpha < 0
if isinstance(mask, np.ndarray):
alpha[mask] += 360
else:
alpha += 360
return alpha, delta
class RotateNative2Celestial(_SkyRotation):
"""
Transform from Native to Celestial Spherical Coordinates.
Parameters
----------
lon : float or or `~astropy.units.Quantity`
Celestial longitude of the fiducial point.
lat : float or or `~astropy.units.Quantity`
Celestial latitude of the fiducial point.
lon_pole : float or or `~astropy.units.Quantity`
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be in units of deg.
"""
#: Inputs are angles on the native sphere
inputs = ('phi_N', 'theta_N')
#: Outputs are angles on the celestial sphere
outputs = ('alpha_C', 'delta_C')
@property
def input_units(self):
""" Input units. """
return {'phi_N': u.deg, 'theta_N': u.deg}
@property
def return_units(self):
""" Output units. """
return {'alpha_C': u.deg, 'delta_C': u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
def evaluate(self, phi_N, theta_N, lon, lat, lon_pole):
"""
Parameters
----------
phi_N, theta_N : float (deg) or `~astropy.units.Quantity`
Angles in the Native coordinate system.
lon, lat, lon_pole : float (in deg) or `~astropy.units.Quantity`
Parameter values when the model was initialized.
Returns
-------
alpha_C, delta_C : float (deg) or `~astropy.units.Quantity`
Angles on the Celestial sphere.
"""
# The values are in radians since they have already been through the setter.
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = lon_pole - np.pi / 2
theta = - (np.pi / 2 - lat)
psi = -(np.pi / 2 + lon)
alpha_C, delta_C = super()._evaluate(phi_N, theta_N, phi, theta, psi)
return alpha_C, delta_C
@property
def inverse(self):
# convert to angles on the celestial sphere
return RotateCelestial2Native(self.lon, self.lat, self.lon_pole)
class RotateCelestial2Native(_SkyRotation):
"""
Transform from Celestial to Native Spherical Coordinates.
Parameters
----------
lon : float or or `~astropy.units.Quantity`
Celestial longitude of the fiducial point.
lat : float or or `~astropy.units.Quantity`
Celestial latitude of the fiducial point.
lon_pole : float or or `~astropy.units.Quantity`
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be in units of deg.
"""
#: Inputs are angles on the celestial sphere
inputs = ('alpha_C', 'delta_C')
#: Outputs are angles on the native sphere
outputs = ('phi_N', 'theta_N')
@property
def input_units(self):
""" Input units. """
return {'alpha_C': u.deg, 'delta_C': u.deg}
@property
def return_units(self):
""" Output units. """
return {'phi_N': u.deg, 'theta_N': u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
def evaluate(self, alpha_C, delta_C, lon, lat, lon_pole):
"""
Parameters
----------
alpha_C, delta_C : float (deg) or `~astropy.units.Quantity`
Angles in the Celestial coordinate frame.
lon, lat, lon_pole : float (deg) or `~astropy.units.Quantity`
Parameter values when the model was initialized.
Returns
-------
phi_N, theta_N : float (deg) or `~astropy.units.Quantity`
Angles on the Native sphere.
"""
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = (np.pi / 2 + lon)
theta = (np.pi / 2 - lat)
psi = -(lon_pole - np.pi / 2)
phi_N, theta_N = super()._evaluate(alpha_C, delta_C, phi, theta, psi)
return phi_N, theta_N
@property
def inverse(self):
return RotateNative2Celestial(self.lon, self.lat, self.lon_pole)
class Rotation2D(Model):
"""
Perform a 2D rotation given an angle.
Positive angles represent a counter-clockwise rotation and vice-versa.
Parameters
----------
angle : float or `~astropy.units.Quantity`
Angle of rotation (if float it should be in deg).
"""
inputs = ('x', 'y')
outputs = ('x', 'y')
_separable = False
angle = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
_input_units_strict = True
_input_units_allow_dimensionless = True
@property
def inverse(self):
"""Inverse rotation."""
return self.__class__(angle=-self.angle)
@classmethod
def evaluate(cls, x, y, angle):
"""
Rotate (x, y) about ``angle``.
Parameters
----------
x, y : ndarray-like
Input quantities
angle : float (deg) or `~astropy.units.Quantity`
Angle of rotations.
"""
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
# Note: If the original shape was () (an array scalar) convert to a
# 1-element 1-D array on output for consistency with most other models
orig_shape = x.shape or (1,)
if isinstance(x, u.Quantity):
unit = x.unit
else:
unit = None
inarr = np.array([x.flatten(), y.flatten()])
if isinstance(angle, u.Quantity):
angle = angle.value
result = np.dot(cls._compute_matrix(angle), inarr)
x, y = result[0], result[1]
x.shape = y.shape = orig_shape
if unit is not None:
return u.Quantity(x, unit=unit), u.Quantity(y, unit=unit)
else:
return x, y
@staticmethod
def _compute_matrix(angle):
return np.array([[math.cos(angle), -math.sin(angle)],
[math.sin(angle), math.cos(angle)]],
dtype=np.float64)
|
dd29687979ebd4be5f0fabd789cc8869bdff040218caf737a120f039166003f2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides utility functions for the models package
"""
from collections import deque, MutableMapping
from inspect import signature
import numpy as np
from ..utils import isiterable, check_broadcast
from ..utils.compat import NUMPY_LT_1_14
from .. import units as u
__all__ = ['ExpressionTree', 'AliasDict', 'check_broadcast',
'poly_map_domain', 'comb', 'ellipse_extent']
class ExpressionTree:
__slots__ = ['left', 'right', 'value', 'inputs', 'outputs']
def __init__(self, value, left=None, right=None, inputs=None, outputs=None):
self.value = value
self.inputs = inputs
self.outputs = outputs
self.left = left
# Two subtrees can't be the same *object* or else traverse_postorder
# breaks, so we just always copy the right subtree to subvert that.
if right is not None and left is right:
right = right.copy()
self.right = right
def __getstate__(self):
# For some reason the default pickle protocol on Python 2 does not just
# do this. On Python 3 it's not a problem.
return dict((slot, getattr(self, slot)) for slot in self.__slots__)
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
@staticmethod
def _recursive_lookup(branch, adict, key):
if isinstance(branch, ExpressionTree):
return adict[key]
else:
return branch, key
@property
def inputs_map(self):
"""
Map the names of the inputs to this ExpressionTree to the inputs to the leaf models.
"""
inputs_map = {}
if not isinstance(self.value, str): # If we don't have an operator the mapping is trivial
return {inp: (self.value, inp) for inp in self.inputs}
elif self.value == '|':
for inp in self.inputs:
m, inp2 = self._recursive_lookup(self.left, self.left.inputs_map, inp)
inputs_map[inp] = m, inp2
elif self.value == '&':
for i, inp in enumerate(self.inputs):
if i < len(self.left.inputs): # Get from left
m, inp2 = self._recursive_lookup(self.left,
self.left.inputs_map,
self.left.inputs[i])
inputs_map[inp] = m, inp2
else: # Get from right
m, inp2 = self._recursive_lookup(self.right,
self.right.inputs_map,
self.right.inputs[i - len(self.left.inputs)])
inputs_map[inp] = m, inp2
else:
for inp in self.left.inputs:
m, inp2 = self._recursive_lookup(self.left, self.left.inputs_map, inp)
inputs_map[inp] = m, inp2
return inputs_map
@property
def outputs_map(self):
"""
Map the names of the outputs to this ExpressionTree to the outputs to the leaf models.
"""
outputs_map = {}
if not isinstance(self.value, str): # If we don't have an operator the mapping is trivial
return {out: (self.value, out) for out in self.outputs}
elif self.value == '|':
for out in self.outputs:
m, out2 = self._recursive_lookup(self.right, self.right.outputs_map, out)
outputs_map[out] = m, out2
elif self.value == '&':
for i, out in enumerate(self.outputs):
if i < len(self.left.outputs): # Get from left
m, out2 = self._recursive_lookup(self.left,
self.left.outputs_map,
self.left.outputs[i])
outputs_map[out] = m, out2
else: # Get from right
m, out2 = self._recursive_lookup(self.right,
self.right.outputs_map,
self.right.outputs[i - len(self.left.outputs)])
outputs_map[out] = m, out2
else:
for out in self.left.outputs:
m, out2 = self._recursive_lookup(self.left, self.left.outputs_map, out)
outputs_map[out] = m, out2
return outputs_map
@property
def isleaf(self):
return self.left is None and self.right is None
def traverse_preorder(self):
stack = deque([self])
while stack:
node = stack.pop()
yield node
if node.right is not None:
stack.append(node.right)
if node.left is not None:
stack.append(node.left)
def traverse_inorder(self):
stack = deque()
node = self
while stack or node is not None:
if node is not None:
stack.append(node)
node = node.left
else:
node = stack.pop()
yield node
node = node.right
def traverse_postorder(self):
stack = deque([self])
last = None
while stack:
node = stack[-1]
if last is None or node is last.left or node is last.right:
if node.left is not None:
stack.append(node.left)
elif node.right is not None:
stack.append(node.right)
elif node.left is last and node.right is not None:
stack.append(node.right)
else:
yield stack.pop()
last = node
def evaluate(self, operators, getter=None, start=0, stop=None):
"""Evaluate the expression represented by this tree.
``Operators`` should be a dictionary mapping operator names ('tensor',
'product', etc.) to a function that implements that operator for the
correct number of operands.
If given, ``getter`` is a function evaluated on each *leaf* node's
value before applying the operator between them. This could be used,
for example, to operate on an attribute of the node values rather than
directly on the node values. The ``getter`` is passed both the index
of the leaf (a count starting at 0 that is incremented after each leaf
is found) and the leaf node itself.
The ``start`` and ``stop`` arguments allow evaluating a sub-expression
within the expression tree.
TODO: Document this better.
"""
stack = deque()
if getter is None:
getter = lambda idx, value: value
if start is None:
start = 0
leaf_idx = 0
for node in self.traverse_postorder():
if node.isleaf:
# For a "tree" containing just a single operator at the root
# Also push the index of this leaf onto the stack, which will
# prove useful for evaluating subexpressions
stack.append((getter(leaf_idx, node.value), leaf_idx))
leaf_idx += 1
else:
operator = operators[node.value]
if len(stack) < 2:
# Skip this operator if there are not enough operands on
# the stack; this can happen if some operands were skipped
# when evaluating a sub-expression
continue
right = stack.pop()
left = stack.pop()
operands = []
for operand in (left, right):
# idx is the leaf index; -1 if not a leaf node
if operand[-1] == -1:
operands.append(operand)
else:
operand, idx = operand
if start <= idx and (stop is None or idx < stop):
operands.append((operand, idx))
if len(operands) == 2:
# evaluate the operator with the given operands and place
# the result on the stack (with -1 for the "leaf index"
# since this result is not a leaf node
left, right = operands
stack.append((operator(left[0], right[0]), -1))
elif len(operands) == 0:
# Just push the left one back on the stack
# TODO: Explain and/or refactor this better
# This is here because even if both operands were "skipped"
# due to being outside the (start, stop) range, we've only
# skipped one operator. But there should be at least 2
# operators involving these operands, so we push the one
# from the left back onto the stack so that the next
# operator will be skipped as well. Should probably come
# up with an easier to follow way to write this algorithm
stack.append(left)
else:
# one or more of the operands was not included in the
# sub-expression slice, so don't evaluate the operator;
# instead place left over operands (if any) back on the
# stack for later use
stack.extend(operands)
return stack.pop()[0]
def copy(self):
# Hopefully this won't blow the stack for any practical case; if such a
# case arises that this won't work then I suppose we can find an
# iterative approach.
children = []
for child in (self.left, self.right):
if isinstance(child, ExpressionTree):
children.append(child.copy())
else:
children.append(child)
return self.__class__(self.value, left=children[0], right=children[1])
def format_expression(self, operator_precedence, format_leaf=None):
leaf_idx = 0
operands = deque()
if format_leaf is None:
format_leaf = lambda i, l: '[{0}]'.format(i)
for node in self.traverse_postorder():
if node.isleaf:
operands.append(format_leaf(leaf_idx, node))
leaf_idx += 1
continue
oper_order = operator_precedence[node.value]
right = operands.pop()
left = operands.pop()
if (node.left is not None and not node.left.isleaf and
operator_precedence[node.left.value] < oper_order):
left = '({0})'.format(left)
if (node.right is not None and not node.right.isleaf and
operator_precedence[node.right.value] < oper_order):
right = '({0})'.format(right)
operands.append(' '.join((left, node.value, right)))
return ''.join(operands)
class AliasDict(MutableMapping):
"""
Creates a `dict` like object that wraps an existing `dict` or other
`MutableMapping`, along with a `dict` of *key aliases* that translate
between specific keys in this dict to different keys in the underlying
dict.
In other words, keys that do not have an associated alias are accessed and
stored like a normal `dict`. However, a key that has an alias is accessed
and stored to the "parent" dict via the alias.
Parameters
----------
parent : dict-like
The parent `dict` that aliased keys and accessed from and stored to.
aliases : dict-like
Maps keys in this dict to their associated keys in the parent dict.
Examples
--------
>>> parent = {'a': 1, 'b': 2, 'c': 3}
>>> aliases = {'foo': 'a', 'bar': 'c'}
>>> alias_dict = AliasDict(parent, aliases)
>>> alias_dict['foo']
1
>>> alias_dict['bar']
3
Keys in the original parent dict are not visible if they were not
aliased::
>>> alias_dict['b']
Traceback (most recent call last):
...
KeyError: 'b'
Likewise, updates to aliased keys are reflected back in the parent dict::
>>> alias_dict['foo'] = 42
>>> alias_dict['foo']
42
>>> parent['a']
42
However, updates/insertions to keys that are *not* aliased are not
reflected in the parent dict::
>>> alias_dict['qux'] = 99
>>> alias_dict['qux']
99
>>> 'qux' in parent
False
In particular, updates on the `AliasDict` to a key that is equal to
one of the aliased keys in the parent dict does *not* update the parent
dict. For example, ``alias_dict`` aliases ``'foo'`` to ``'a'``. But
assigning to a key ``'a'`` on the `AliasDict` does not impact the
parent::
>>> alias_dict['a'] = 'nope'
>>> alias_dict['a']
'nope'
>>> parent['a']
42
"""
_store_type = dict
"""
Subclasses may override this to use other mapping types as the underlying
storage, for example an `OrderedDict`. However, even in this case
additional work may be needed to get things like the ordering right.
"""
def __init__(self, parent, aliases):
self._parent = parent
self._store = self._store_type()
self._aliases = dict(aliases)
def __getitem__(self, key):
if key in self._aliases:
try:
return self._parent[self._aliases[key]]
except KeyError:
raise KeyError(key)
return self._store[key]
def __setitem__(self, key, value):
if key in self._aliases:
self._parent[self._aliases[key]] = value
else:
self._store[key] = value
def __delitem__(self, key):
if key in self._aliases:
try:
del self._parent[self._aliases[key]]
except KeyError:
raise KeyError(key)
else:
del self._store[key]
def __iter__(self):
"""
First iterates over keys from the parent dict (if the aliased keys are
present in the parent), followed by any keys in the local store.
"""
for key, alias in self._aliases.items():
if alias in self._parent:
yield key
for key in self._store:
yield key
def __len__(self):
# TODO:
# This could be done more efficiently, but at present the use case for
# it is narrow if non-existent.
return len(list(iter(self)))
def __repr__(self):
# repr() just like any other dict--this should look transparent
store_copy = self._store_type()
for key, alias in self._aliases.items():
if alias in self._parent:
store_copy[key] = self._parent[alias]
store_copy.update(self._store)
return repr(store_copy)
class _BoundingBox(tuple):
"""
Base class for models with custom bounding box templates (methods that
return an actual bounding box tuple given some adjustable parameters--see
for example `~astropy.modeling.models.Gaussian1D.bounding_box`).
On these classes the ``bounding_box`` property still returns a `tuple`
giving the default bounding box for that instance of the model. But that
tuple may also be a subclass of this class that is callable, and allows
a new tuple to be returned using a user-supplied value for any adjustable
parameters to the bounding box.
"""
_model = None
def __new__(cls, input_, _model=None):
self = super().__new__(cls, input_)
if _model is not None:
# Bind this _BoundingBox (most likely a subclass) to a Model
# instance so that its __call__ can access the model
self._model = _model
return self
def __call__(self, *args, **kwargs):
raise NotImplementedError(
"This bounding box is fixed by the model and does not have "
"adjustable parameters.")
@classmethod
def validate(cls, model, bounding_box):
"""
Validate a given bounding box sequence against the given model (which
may be either a subclass of `~astropy.modeling.Model` or an instance
thereof, so long as the ``.inputs`` attribute is defined.
Currently this just checks that the bounding_box is either a 2-tuple
of lower and upper bounds for 1-D models, or an N-tuple of 2-tuples
for N-D models.
This also returns a normalized version of the bounding_box input to
ensure it is always an N-tuple (even for the 1-D case).
"""
nd = model.n_inputs
if nd == 1:
if (not isiterable(bounding_box)
or np.shape(bounding_box) not in ((2,), (1, 2))):
raise ValueError(
"Bounding box for {0} model must be a sequence of length "
"2 consisting of a lower and upper bound, or a 1-tuple "
"containing such a sequence as its sole element.".format(
model.name))
if len(bounding_box) == 1:
return cls((tuple(bounding_box[0]),))
else:
return cls(tuple(bounding_box))
else:
if (not isiterable(bounding_box)
or np.shape(bounding_box) != (nd, 2)):
raise ValueError(
"Bounding box for {0} model must be a sequence of length "
"{1} (the number of model inputs) consisting of pairs of "
"lower and upper bounds for those inputs on which to "
"evaluate the model.".format(model.name, nd))
return cls(tuple(bounds) for bounds in bounding_box)
def make_binary_operator_eval(oper, f, g):
"""
Given a binary operator (as a callable of two arguments) ``oper`` and
two callables ``f`` and ``g`` which accept the same arguments,
returns a *new* function that takes the same arguments as ``f`` and ``g``,
but passes the outputs of ``f`` and ``g`` in the given ``oper``.
``f`` and ``g`` are assumed to return tuples (which may be 1-tuples). The
given operator is applied element-wise to tuple outputs).
Example
-------
>>> from operator import add
>>> def prod(x, y):
... return (x * y,)
...
>>> sum_of_prod = make_binary_operator_eval(add, prod, prod)
>>> sum_of_prod(3, 5)
(30,)
"""
return lambda inputs, params: \
tuple(oper(x, y) for x, y in zip(f(inputs, params),
g(inputs, params)))
def poly_map_domain(oldx, domain, window):
"""
Map domain into window by shifting and scaling.
Parameters
----------
oldx : array
original coordinates
domain : list or tuple of length 2
function domain
window : list or tuple of length 2
range into which to map the domain
"""
domain = np.array(domain, dtype=np.float64)
window = np.array(window, dtype=np.float64)
scl = (window[1] - window[0]) / (domain[1] - domain[0])
off = (window[0] * domain[1] - window[1] * domain[0]) / (domain[1] - domain[0])
return off + scl * oldx
def comb(N, k):
"""
The number of combinations of N things taken k at a time.
Parameters
----------
N : int, array
Number of things.
k : int, array
Number of elements taken.
"""
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for j in range(min(k, N - k)):
val = (val * (N - j)) / (j + 1)
return val
def array_repr_oneline(array):
"""
Represents a multi-dimensional Numpy array flattened onto a single line.
"""
sep = ',' if NUMPY_LT_1_14 else ', '
r = np.array2string(array, separator=sep, suppress_small=True)
return ' '.join(l.strip() for l in r.splitlines())
def combine_labels(left, right):
"""
For use with the join operator &: Combine left input/output labels with
right input/output labels.
If none of the labels conflict then this just returns a sum of tuples.
However if *any* of the labels conflict, this appends '0' to the left-hand
labels and '1' to the right-hand labels so there is no ambiguity).
"""
if set(left).intersection(right):
left = tuple(l + '0' for l in left)
right = tuple(r + '1' for r in right)
return left + right
def ellipse_extent(a, b, theta):
"""
Calculates the extent of a box encapsulating a rotated 2D ellipse.
Parameters
----------
a : float or `~astropy.units.Quantity`
Major axis.
b : float or `~astropy.units.Quantity`
Minor axis.
theta : float or `~astropy.units.Quantity`
Rotation angle. If given as a floating-point value, it is assumed to be
in radians.
Returns
-------
offsets : tuple
The absolute value of the offset distances from the ellipse center that
define its bounding box region, ``(dx, dy)``.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Ellipse2D
from astropy.modeling.utils import ellipse_extent, render_model
amplitude = 1
x0 = 50
y0 = 50
a = 30
b = 10
theta = np.pi/4
model = Ellipse2D(amplitude, x0, y0, a, b, theta)
dx, dy = ellipse_extent(a, b, theta)
limits = [x0 - dx, x0 + dx, y0 - dy, y0 + dy]
model.bounding_box = limits
image = render_model(model)
plt.imshow(image, cmap='binary', interpolation='nearest', alpha=.5,
extent = limits)
plt.show()
"""
t = np.arctan2(-b * np.tan(theta), a)
dx = a * np.cos(t) * np.cos(theta) - b * np.sin(t) * np.sin(theta)
t = np.arctan2(b, a * np.tan(theta))
dy = b * np.sin(t) * np.cos(theta) + a * np.cos(t) * np.sin(theta)
if isinstance(dx, u.Quantity) or isinstance(dy, u.Quantity):
return np.abs(u.Quantity([dx, dy]))
else:
return np.abs([dx, dy])
def get_inputs_and_params(func):
"""
Given a callable, determine the input variables and the
parameters.
Parameters
----------
func : callable
Returns
-------
inputs, params : tuple
Each entry is a list of inspect.Parameter objects
"""
sig = signature(func)
inputs = []
params = []
for param in sig.parameters.values():
if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD):
raise ValueError("Signature must not have *args or **kwargs")
if param.default == param.empty:
inputs.append(param)
else:
params.append(param)
return inputs, params
def _parameter_with_unit(parameter, unit):
if parameter.unit is None:
return parameter.value * unit
else:
return parameter.quantity.to(unit)
def _parameter_without_unit(value, old_unit, new_unit):
if old_unit is None:
return value
else:
return value * old_unit.to(new_unit)
def _combine_equivalency_dict(keys, eq1=None, eq2=None):
# Given two dictionaries that give equivalencies for a set of keys, for
# example input value names, return a dictionary that includes all the
# equivalencies
eq = {}
for key in keys:
eq[key] = []
if eq1 is not None and key in eq1:
eq[key].extend(eq1[key])
if eq2 is not None and key in eq2:
eq[key].extend(eq2[key])
return eq
def _to_radian(value):
""" Convert ``value`` to radian. """
if isinstance(value, u.Quantity):
return value.to(u.rad)
else:
return np.deg2rad(value)
def _to_orig_unit(value, raw_unit=None, orig_unit=None):
""" Convert value with ``raw_unit`` to ``orig_unit``. """
if raw_unit is not None:
return (value * raw_unit).to(orig_unit)
else:
return np.rad2deg(value)
|
715f5d445c55e6b90fa4a37db23c1bde9476283b99669c986e6a43a02a5bdf46 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Mathematical models."""
from collections import OrderedDict
import numpy as np
from .core import (Fittable1DModel, Fittable2DModel,
ModelDefinitionError)
from .parameters import Parameter, InputParameterError
from .utils import ellipse_extent
from ..stats.funcs import gaussian_sigma_to_fwhm
from .. import units as u
from ..units import Quantity, UnitsError
__all__ = ['AiryDisk2D', 'Moffat1D', 'Moffat2D', 'Box1D', 'Box2D', 'Const1D',
'Const2D', 'Ellipse2D', 'Disk2D', 'Gaussian1D',
'Gaussian2D', 'Linear1D', 'Lorentz1D',
'MexicanHat1D', 'MexicanHat2D', 'RedshiftScaleFactor',
'Scale', 'Sersic1D', 'Sersic2D', 'Shift', 'Sine1D', 'Trapezoid1D',
'TrapezoidDisk2D', 'Ring2D', 'Voigt1D']
TWOPI = 2 * np.pi
FLOAT_EPSILON = float(np.finfo(np.float32).tiny)
class Gaussian1D(Fittable1DModel):
"""
One dimensional Gaussian model.
Parameters
----------
amplitude : float
Amplitude of the Gaussian.
mean : float
Mean of the Gaussian.
stddev : float
Standard deviation of the Gaussian.
Notes
-----
Model formula:
.. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that 'mean' is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Gaussian1D
plt.figure()
s1 = Gaussian1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
See Also
--------
Gaussian2D, Box1D, Moffat1D, Lorentz1D
"""
amplitude = Parameter(default=1)
mean = Parameter(default=0)
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
stddev = Parameter(default=1, bounds=(FLOAT_EPSILON, None))
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``
Parameters
----------
factor : float
The multiple of `stddev` used to define the limits.
The default is 5.5, corresponding to a relative error < 1e-7.
Examples
--------
>>> from astropy.modeling.models import Gaussian1D
>>> model = Gaussian1D(mean=0, stddev=2)
>>> model.bounding_box
(-11.0, 11.0)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor,
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
(-4.0, 4.0)
"""
x0 = self.mean
dx = factor * self.stddev
return (x0 - dx, x0 + dx)
@property
def fwhm(self):
"""Gaussian full width at half maximum."""
return self.stddev * gaussian_sigma_to_fwhm
@staticmethod
def evaluate(x, amplitude, mean, stddev):
"""
Gaussian1D model function.
"""
return amplitude * np.exp(- 0.5 * (x - mean) ** 2 / stddev ** 2)
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3
return [d_amplitude, d_mean, d_stddev]
@property
def input_units(self):
if self.mean.unit is None:
return None
else:
return {'x': self.mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('mean', inputs_unit['x']),
('stddev', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Gaussian2D(Fittable2DModel):
r"""
Two dimensional Gaussian model.
Parameters
----------
amplitude : float
Amplitude of the Gaussian.
x_mean : float
Mean of the Gaussian in x.
y_mean : float
Mean of the Gaussian in y.
x_stddev : float or None
Standard deviation of the Gaussian in x before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
y_stddev : float or None
Standard deviation of the Gaussian in y before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
theta : float, optional
Rotation angle in radians. The rotation angle increases
counterclockwise. Must be None if a covariance matrix (``cov_matrix``)
is provided. If no ``cov_matrix`` is given, ``None`` means the default
value (0).
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the ``x_stddev``,
``y_stddev``, and ``theta`` defaults.
Notes
-----
Model formula:
.. math::
f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right)
\left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}}
Using the following definitions:
.. math::
a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} -
\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right)
c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
If using a ``cov_matrix``, the model is of the form:
.. math::
f(x, y) = A e^{-0.5 \left(\vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0}\right)}
where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`,
and :math:`\Sigma` is the covariance matrix:
.. math::
\Sigma = \left(\begin{array}{ccc}
\sigma_x^2 & \rho \sigma_x \sigma_y \\
\rho \sigma_x \sigma_y & \sigma_y^2
\end{array}\right)
:math:`\rho` is the correlation between ``x`` and ``y``, which should
be between -1 and +1. Positive correlation corresponds to a
``theta`` in the range 0 to 90 degrees. Negative correlation
corresponds to a ``theta`` in the range of 0 to -90 degrees.
See [1]_ for more details about the 2D Gaussian function.
See Also
--------
Gaussian1D, Box2D, Moffat2D
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
amplitude = Parameter(default=1)
x_mean = Parameter(default=0)
y_mean = Parameter(default=0)
x_stddev = Parameter(default=1)
y_stddev = Parameter(default=1)
theta = Parameter(default=0.0)
def __init__(self, amplitude=amplitude.default, x_mean=x_mean.default,
y_mean=y_mean.default, x_stddev=None, y_stddev=None,
theta=None, cov_matrix=None, **kwargs):
if cov_matrix is None:
if x_stddev is None:
x_stddev = self.__class__.x_stddev.default
if y_stddev is None:
y_stddev = self.__class__.y_stddev.default
if theta is None:
theta = self.__class__.theta.default
else:
if x_stddev is not None or y_stddev is not None or theta is not None:
raise InputParameterError("Cannot specify both cov_matrix and "
"x/y_stddev/theta")
else:
# Compute principle coordinate system transformation
cov_matrix = np.array(cov_matrix)
if cov_matrix.shape != (2, 2):
# TODO: Maybe it should be possible for the covariance matrix
# to be some (x, y, ..., z, 2, 2) array to be broadcast with
# other parameters of shape (x, y, ..., z)
# But that's maybe a special case to work out if/when needed
raise ValueError("Covariance matrix must be 2x2")
eig_vals, eig_vecs = np.linalg.eig(cov_matrix)
x_stddev, y_stddev = np.sqrt(eig_vals)
y_vec = eig_vecs[:, 0]
theta = np.arctan2(y_vec[1], y_vec[0])
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
# TODO: Investigate why setting this in Parameter above causes
# convolution tests to hang.
kwargs.setdefault('bounds', {})
kwargs['bounds'].setdefault('x_stddev', (FLOAT_EPSILON, None))
kwargs['bounds'].setdefault('y_stddev', (FLOAT_EPSILON, None))
super().__init__(
amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta, **kwargs)
@property
def x_fwhm(self):
"""Gaussian full width at half maximum in X."""
return self.x_stddev * gaussian_sigma_to_fwhm
@property
def y_fwhm(self):
"""Gaussian full width at half maximum in Y."""
return self.y_stddev * gaussian_sigma_to_fwhm
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits in each dimension,
``((y_low, y_high), (x_low, x_high))``
The default offset from the mean is 5.5-sigma, corresponding
to a relative error < 1e-7. The limits are adjusted for rotation.
Parameters
----------
factor : float, optional
The multiple of `x_stddev` and `y_stddev` used to define the limits.
The default is 5.5.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2)
>>> model.bounding_box
((-11.0, 11.0), (-5.5, 5.5))
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
((-4.0, 4.0), (-2.0, 2.0))
"""
a = factor * self.x_stddev
b = factor * self.y_stddev
theta = self.theta.value
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_mean - dy, self.y_mean + dy),
(self.x_mean - dx, self.x_mean + dx))
@staticmethod
def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function"""
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xdiff = x - x_mean
ydiff = y - y_mean
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
return amplitude * np.exp(-((a * xdiff ** 2) + (b * xdiff * ydiff) +
(c * ydiff ** 2)))
@staticmethod
def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function derivative with respect to parameters"""
cost = np.cos(theta)
sint = np.sin(theta)
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
cos2t = np.cos(2. * theta)
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xstd3 = x_stddev ** 3
ystd3 = y_stddev ** 3
xdiff = x - x_mean
ydiff = y - y_mean
xdiff2 = xdiff ** 2
ydiff2 = ydiff ** 2
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) +
(c * ydiff2)))
da_dtheta = (sint * cost * ((1. / ystd2) - (1. / xstd2)))
da_dx_stddev = -cost2 / xstd3
da_dy_stddev = -sint2 / ystd3
db_dtheta = (cos2t / xstd2) - (cos2t / ystd2)
db_dx_stddev = -sin2t / xstd3
db_dy_stddev = sin2t / ystd3
dc_dtheta = -da_dtheta
dc_dx_stddev = -sint2 / xstd3
dc_dy_stddev = -cost2 / ystd3
dg_dA = g / amplitude
dg_dx_mean = g * ((2. * a * xdiff) + (b * ydiff))
dg_dy_mean = g * ((b * xdiff) + (2. * c * ydiff))
dg_dx_stddev = g * (-(da_dx_stddev * xdiff2 +
db_dx_stddev * xdiff * ydiff +
dc_dx_stddev * ydiff2))
dg_dy_stddev = g * (-(da_dy_stddev * xdiff2 +
db_dy_stddev * xdiff * ydiff +
dc_dy_stddev * ydiff2))
dg_dtheta = g * (-(da_dtheta * xdiff2 +
db_dtheta * xdiff * ydiff +
dc_dtheta * ydiff2))
return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev,
dg_dtheta]
@property
def input_units(self):
if self.x_mean.unit is None and self.y_mean.unit is None:
return None
else:
return {'x': self.x_mean.unit,
'y': self.y_mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_mean', inputs_unit['x']),
('y_mean', inputs_unit['x']),
('x_stddev', inputs_unit['x']),
('y_stddev', inputs_unit['x']),
('theta', u.rad),
('amplitude', outputs_unit['z'])])
class Shift(Fittable1DModel):
"""
Shift a coordinate.
Parameters
----------
offset : float
Offset to add to a coordinate.
"""
inputs = ('x',)
outputs = ('x',)
offset = Parameter(default=0)
linear = True
_input_units_strict = True
_input_units_allow_dimensionless = True
@property
def input_units(self):
if self.offset.unit is None:
return None
else:
return {'x': self.offset.unit}
@property
def inverse(self):
"""One dimensional inverse Shift model function"""
inv = self.copy()
inv.offset *= -1
return inv
@staticmethod
def evaluate(x, offset):
"""One dimensional Shift model function"""
return x + offset
@staticmethod
def sum_of_implicit_terms(x):
"""Evaluate the implicit term (x) of one dimensional Shift model"""
return x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Shift model derivative with respect to parameter"""
d_offset = np.ones_like(x)
return [d_offset]
class Scale(Fittable1DModel):
"""
Multiply a model by a factor.
Parameters
----------
factor : float
Factor by which to scale a coordinate.
"""
inputs = ('x',)
outputs = ('x',)
factor = Parameter(default=1)
linear = True
fittable = True
_input_units_strict = True
_input_units_allow_dimensionless = True
@property
def input_units(self):
if self.factor.unit is None:
return None
else:
return {'x': self.factor.unit}
@property
def inverse(self):
"""One dimensional inverse Scale model function"""
inv = self.copy()
inv.factor = 1 / self.factor
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional Scale model function"""
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Scale model derivative with respect to parameter"""
d_factor = x
return [d_factor]
class RedshiftScaleFactor(Fittable1DModel):
"""
One dimensional redshift scale factor model.
Parameters
----------
z : float
Redshift value.
Notes
-----
Model formula:
.. math:: f(x) = x (1 + z)
"""
z = Parameter(description='redshift', default=0)
@staticmethod
def evaluate(x, z):
"""One dimensional RedshiftScaleFactor model function"""
return (1 + z) * x
@staticmethod
def fit_deriv(x, z):
"""One dimensional RedshiftScaleFactor model derivative"""
d_z = x
return [d_z]
@property
def inverse(self):
"""Inverse RedshiftScaleFactor model"""
inv = self.copy()
inv.z = 1.0 / (1.0 + self.z) - 1.0
return inv
class Sersic1D(Fittable1DModel):
r"""
One dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Central surface brightness, within r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
See Also
--------
Gaussian1D, Moffat1D, Lorentz1D
Notes
-----
Model formula:
.. math::
I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic1D
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(111, xscale='log', yscale='log')
s1 = Sersic1D(amplitude=1, r_eff=5)
r=np.arange(0, 100, .01)
for n in range(1, 10):
s1.n = n
plt.plot(r, s1(r), color=str(float(n) / 15))
plt.axis([1e-1, 30, 1e-2, 1e3])
plt.xlabel('log Radius')
plt.ylabel('log Surface Brightness')
plt.text(.25, 1.5, 'n=1')
plt.text(.25, 300, 'n=10')
plt.xticks([])
plt.yticks([])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1)
r_eff = Parameter(default=1)
n = Parameter(default=4)
_gammaincinv = None
@classmethod
def evaluate(cls, r, amplitude, r_eff, n):
"""One dimensional Sersic profile function."""
if cls._gammaincinv is None:
try:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
except ValueError:
raise ImportError('Sersic1D model requires scipy > 0.11.')
return (amplitude * np.exp(
-cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1)))
@property
def input_units(self):
if self.r_eff.unit is None:
return None
else:
return {'x': self.r_eff.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('r_eff', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Sine1D(Fittable1DModel):
"""
One dimensional Sine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Sine1D
plt.figure()
s1 = Sine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
amplitude = Parameter(default=1)
frequency = Parameter(default=1)
phase = Parameter(default=0)
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Sine model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.sin(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Sine model derivative"""
d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase)
d_frequency = (TWOPI * x * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
d_phase = (TWOPI * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
return [d_amplitude, d_frequency, d_phase]
@property
def input_units(self):
if self.frequency.unit is None:
return None
else:
return {'x': 1. / self.frequency.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('frequency', inputs_unit['x'] ** -1),
('amplitude', outputs_unit['y'])])
class Linear1D(Fittable1DModel):
"""
One dimensional Line model.
Parameters
----------
slope : float
Slope of the straight line
intercept : float
Intercept of the straight line
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x) = a x + b
"""
slope = Parameter(default=1)
intercept = Parameter(default=0)
linear = True
@staticmethod
def evaluate(x, slope, intercept):
"""One dimensional Line model function"""
return slope * x + intercept
@staticmethod
def fit_deriv(x, slope, intercept):
"""One dimensional Line model derivative with respect to parameters"""
d_slope = x
d_intercept = np.ones_like(x)
return [d_slope, d_intercept]
@property
def inverse(self):
new_slope = self.slope ** -1
new_intercept = -self.intercept / self.slope
return self.__class__(slope=new_slope, intercept=new_intercept)
@property
def input_units(self):
if self.intercept.unit is None and self.slope.unit is None:
return None
else:
return {'x': self.intercept.unit / self.slope.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('intercept', outputs_unit['y']),
('slope', outputs_unit['y'] / inputs_unit['x'])])
class Planar2D(Fittable2DModel):
"""
Two dimensional Plane model.
Parameters
----------
slope_x : float
Slope of the straight line in X
slope_y : float
Slope of the straight line in Y
intercept : float
Z-intercept of the straight line
See Also
--------
Linear1D, Polynomial2D
Notes
-----
Model formula:
.. math:: f(x, y) = a x + b y + c
"""
slope_x = Parameter(default=1)
slope_y = Parameter(default=1)
intercept = Parameter(default=0)
linear = True
@staticmethod
def evaluate(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model function"""
return slope_x * x + slope_y * y + intercept
@staticmethod
def fit_deriv(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model derivative with respect to parameters"""
d_slope_x = x
d_slope_y = y
d_intercept = np.ones_like(x)
return [d_slope_x, d_slope_y, d_intercept]
class Lorentz1D(Fittable1DModel):
"""
One dimensional Lorentzian model.
Parameters
----------
amplitude : float
Peak value
x_0 : float
Position of the peak
fwhm : float
Full width at half maximum
See Also
--------
Gaussian1D, Box1D, MexicanHat1D
Notes
-----
Model formula:
.. math::
f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Lorentz1D
plt.figure()
s1 = Lorentz1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
fwhm = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model function"""
return (amplitude * ((fwhm / 2.) ** 2) / ((x - x_0) ** 2 +
(fwhm / 2.) ** 2))
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model derivative with respect to parameters"""
d_amplitude = fwhm ** 2 / (fwhm ** 2 + (x - x_0) ** 2)
d_x_0 = (amplitude * d_amplitude * (2 * x - 2 * x_0) /
(fwhm ** 2 + (x - x_0) ** 2))
d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
def bounding_box(self, factor=25):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
Default is chosen to include most (99%) of the
area under the curve, while still showing the
central feature of interest.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('fwhm', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Voigt1D(Fittable1DModel):
"""
One dimensional model for the Voigt profile.
Parameters
----------
x_0 : float
Position of the peak
amplitude_L : float
The Lorentzian amplitude
fwhm_L : float
The Lorentzian full width at half maximum
fwhm_G : float
The Gaussian full width at half maximum
See Also
--------
Gaussian1D, Lorentz1D
Notes
-----
Algorithm for the computation taken from
McLean, A. B., Mitchell, C. E. J. & Swanston, D. M. Implementation of an
efficient analytical approximation to the Voigt function for photoemission
lineshape analysis. Journal of Electron Spectroscopy and Related Phenomena
69, 125-132 (1994)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Voigt1D
import matplotlib.pyplot as plt
plt.figure()
x = np.arange(0, 10, 0.01)
v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
plt.plot(x, v1(x))
plt.show()
"""
x_0 = Parameter(default=0)
amplitude_L = Parameter(default=1)
fwhm_L = Parameter(default=2/np.pi)
fwhm_G = Parameter(default=np.log(2))
_abcd = np.array([
[-1.2150, -1.3509, -1.2150, -1.3509], # A
[1.2359, 0.3786, -1.2359, -0.3786], # B
[-0.3085, 0.5906, -0.3085, 0.5906], # C
[0.0210, -1.1858, -0.0210, 1.1858]]) # D
@classmethod
def evaluate(cls, x, x_0, amplitude_L, fwhm_L, fwhm_G):
A, B, C, D = cls._abcd
sqrt_ln2 = np.sqrt(np.log(2))
X = (x - x_0) * 2 * sqrt_ln2 / fwhm_G
X = np.atleast_1d(X)[..., np.newaxis]
Y = fwhm_L * sqrt_ln2 / fwhm_G
Y = np.atleast_1d(Y)[..., np.newaxis]
V = np.sum((C * (Y - A) + D * (X - B))/(((Y - A) ** 2 + (X - B) ** 2)), axis=-1)
return (fwhm_L * amplitude_L * np.sqrt(np.pi) * sqrt_ln2 / fwhm_G) * V
@classmethod
def fit_deriv(cls, x, x_0, amplitude_L, fwhm_L, fwhm_G):
A, B, C, D = cls._abcd
sqrt_ln2 = np.sqrt(np.log(2))
X = (x - x_0) * 2 * sqrt_ln2 / fwhm_G
X = np.atleast_1d(X)[:, np.newaxis]
Y = fwhm_L * sqrt_ln2 / fwhm_G
Y = np.atleast_1d(Y)[:, np.newaxis]
constant = fwhm_L * amplitude_L * np.sqrt(np.pi) * sqrt_ln2 / fwhm_G
alpha = C * (Y - A) + D * (X - B)
beta = (Y - A) ** 2 + (X - B) ** 2
V = np.sum((alpha / beta), axis=-1)
dVdx = np.sum((D/beta - 2 * (X - B) * alpha / np.square(beta)), axis=-1)
dVdy = np.sum((C/beta - 2 * (Y - A) * alpha / np.square(beta)), axis=-1)
dyda = [-constant * dVdx * 2 * sqrt_ln2 / fwhm_G,
constant * V / amplitude_L,
constant * (V / fwhm_L + dVdy * sqrt_ln2 / fwhm_G),
-constant * (V + (sqrt_ln2 / fwhm_G) * (2 * (x - x_0) * dVdx + fwhm_L * dVdy)) / fwhm_G]
return dyda
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('fwhm_L', inputs_unit['x']),
('fwhm_G', inputs_unit['x']),
('amplitude_L', outputs_unit['y'])])
class Const1D(Fittable1DModel):
"""
One dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const2D
Notes
-----
Model formula:
.. math:: f(x) = A
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Const1D
plt.figure()
s1 = Const1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
linear = True
@staticmethod
def evaluate(x, amplitude):
"""One dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(x, subok=False)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False)
else:
return x
@staticmethod
def fit_deriv(x, amplitude):
"""One dimensional Constant model derivative with respect to parameters"""
d_amplitude = np.ones_like(x)
return [d_amplitude]
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('amplitude', outputs_unit['y'])])
class Const2D(Fittable2DModel):
"""
Two dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x, y) = A
"""
amplitude = Parameter(default=1)
linear = True
@staticmethod
def evaluate(x, y, amplitude):
"""Two dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(x, subok=False)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False)
else:
return x
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('amplitude', outputs_unit['z'])])
class Ellipse2D(Fittable2DModel):
"""
A 2D Ellipse model.
Parameters
----------
amplitude : float
Value of the ellipse.
x_0 : float
x position of the center of the disk.
y_0 : float
y position of the center of the disk.
a : float
The length of the semimajor axis.
b : float
The length of the semiminor axis.
theta : float
The rotation angle in radians of the semimajor axis. The
rotation angle increases counterclockwise from the positive x
axis.
See Also
--------
Disk2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
\\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos
\\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 +
\\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0)
\\cos \\theta}{b}\\right]^2 \\leq 1 \\\\
0 & : \\mathrm{otherwise}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Ellipse2D
from astropy.coordinates import Angle
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
x0, y0 = 25, 25
a, b = 20, 10
theta = Angle(30, 'deg')
e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b,
theta=theta.radian)
y, x = np.mgrid[0:50, 0:50]
fig, ax = plt.subplots(1, 1)
ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r')
e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red',
facecolor='none')
ax.add_patch(e2)
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
a = Parameter(default=1)
b = Parameter(default=1)
theta = Parameter(default=0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, a, b, theta):
"""Two dimensional Ellipse model function."""
xx = x - x_0
yy = y - y_0
cost = np.cos(theta)
sint = np.sin(theta)
numerator1 = (xx * cost) + (yy * sint)
numerator2 = -(xx * sint) + (yy * cost)
in_ellipse = (((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.)
result = np.select([in_ellipse], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
a = self.a
b = self.b
theta = self.theta.value
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('a', inputs_unit['x']),
('b', inputs_unit['x']),
('theta', u.rad),
('amplitude', outputs_unit['z'])])
class Disk2D(Fittable2DModel):
"""
Two dimensional radial symmetric Disk model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
R_0 : float
Radius of the disk
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r \\leq R_0 \\\\
0 & : r > R_0
\\end{array}
\\right.
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
R_0 = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0):
"""Two dimensional Disk model function"""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
result = np.select([rr <= R_0 ** 2], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
return ((self.y_0 - self.R_0, self.y_0 + self.R_0),
(self.x_0 - self.R_0, self.x_0 + self.R_0))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('R_0', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Ring2D(Fittable2DModel):
"""
Two dimensional radial symmetric Ring model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
r_in : float
Inner radius of the ring
width : float
Width of the ring.
r_out : float
Outer Radius of the ring. Can be specified instead of width.
See Also
--------
Disk2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r_{in} \\leq r \\leq r_{out} \\\\
0 & : \\text{else}
\\end{array}
\\right.
Where :math:`r_{out} = r_{in} + r_{width}`.
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
r_in = Parameter(default=1)
width = Parameter(default=1)
def __init__(self, amplitude=amplitude.default, x_0=x_0.default,
y_0=y_0.default, r_in=r_in.default, width=width.default,
r_out=None, **kwargs):
# If outer radius explicitly given, it overrides default width.
if r_out is not None:
if width != self.width.default:
raise InputParameterError(
"Cannot specify both width and outer radius separately.")
width = r_out - r_in
elif width is None:
width = self.width.default
super().__init__(
amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width,
**kwargs)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, r_in, width):
"""Two dimensional Ring model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
r_range = np.logical_and(rr >= r_in ** 2, rr <= (r_in + width) ** 2)
result = np.select([r_range], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.r_in + self.width
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('r_in', inputs_unit['x']),
('width', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Delta1D(Fittable1DModel):
"""One dimensional Dirac delta function."""
def __init__(self):
raise ModelDefinitionError("Not implemented")
class Delta2D(Fittable2DModel):
"""Two dimensional Dirac delta function."""
def __init__(self):
raise ModelDefinitionError("Not implemented")
class Box1D(Fittable1DModel):
"""
One dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
Position of the center of the box function
width : float
Width of the box
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\
0 & : \\text{else}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Box1D
plt.figure()
s1 = Box1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
width = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, width):
"""One dimensional Box model function"""
inside = np.logical_and(x >= x_0 - width / 2., x <= x_0 + width / 2.)
result = np.select([inside], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@classmethod
def fit_deriv(cls, x, amplitude, x_0, width):
"""One dimensional Box model derivative with respect to parameters"""
d_amplitude = cls.evaluate(x, 1, x_0, width)
d_x_0 = np.zeros_like(x)
d_width = np.zeros_like(x)
return [d_amplitude, d_x_0, d_width]
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('width', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Box2D(Fittable2DModel):
"""
Two dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
x position of the center of the box function
x_width : float
Width in x direction of the box
y_0 : float
y position of the center of the box function
y_width : float
Width in y direction of the box
See Also
--------
Box1D, Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\
& y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\
0 : & \\text{else}
\\end{array}
\\right.
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
x_width = Parameter(default=1)
y_width = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width):
"""Two dimensional Box model function"""
x_range = np.logical_and(x >= x_0 - x_width / 2.,
x <= x_0 + x_width / 2.)
y_range = np.logical_and(y >= y_0 - y_width / 2.,
y <= y_0 + y_width / 2.)
result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dx = self.x_width / 2
dy = self.y_width / 2
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['y']),
('x_width', inputs_unit['x']),
('y_width', inputs_unit['y']),
('amplitude', outputs_unit['z'])])
class Trapezoid1D(Fittable1DModel):
"""
One dimensional Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
Center position of the trapezoid
width : float
Width of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid
See Also
--------
Box1D, Gaussian1D, Moffat1D
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Trapezoid1D
plt.figure()
s1 = Trapezoid1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
width = Parameter(default=1)
slope = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, width, slope):
"""One dimensional Trapezoid model function"""
# Compute the four points where the trapezoid changes slope
# x1 <= x2 <= x3 <= x4
x2 = x_0 - width / 2.
x3 = x_0 + width / 2.
x1 = x2 - amplitude / slope
x4 = x3 + amplitude / slope
# Compute model values in pieces between the change points
range_a = np.logical_and(x >= x1, x < x2)
range_b = np.logical_and(x >= x2, x < x3)
range_c = np.logical_and(x >= x3, x < x4)
val_a = slope * (x - x1)
val_b = amplitude
val_c = slope * (x4 - x)
result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2 + self.amplitude / self.slope
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('width', inputs_unit['x']),
('slope', outputs_unit['y'] / inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class TrapezoidDisk2D(Fittable2DModel):
"""
Two dimensional circular Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
x position of the center of the trapezoid
y_0 : float
y position of the center of the trapezoid
R_0 : float
Radius of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid in x direction.
See Also
--------
Disk2D, Box2D
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
R_0 = Parameter(default=1)
slope = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0, slope):
"""Two dimensional Trapezoid Disk model function"""
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2)
range_1 = r <= R_0
range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope)
val_1 = amplitude
val_2 = amplitude + slope * (R_0 - r)
result = np.select([range_1, range_2], [val_1, val_2])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.R_0 + self.amplitude / self.slope
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('R_0', inputs_unit['x']),
('slope', outputs_unit['z'] / inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class MexicanHat1D(Fittable1DModel):
"""
One dimensional Mexican Hat model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
Position of the peak
sigma : float
Width of the Mexican hat
See Also
--------
MexicanHat2D, Box1D, Gaussian1D, Trapezoid1D
Notes
-----
Model formula:
.. math::
f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import MexicanHat1D
plt.figure()
s1 = MexicanHat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -2, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
sigma = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, sigma):
"""One dimensional Mexican Hat model function"""
xx_ww = (x - x_0) ** 2 / (2 * sigma ** 2)
return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww)
def bounding_box(self, factor=10.0):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of sigma used to define the limits.
"""
x0 = self.x_0
dx = factor * self.sigma
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('sigma', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class MexicanHat2D(Fittable2DModel):
"""
Two dimensional symmetric Mexican Hat model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the peak
y_0 : float
y position of the peak
sigma : float
Width of the Mexican hat
See Also
--------
MexicanHat1D, Gaussian2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}
+ \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{\\frac{- \\left(x - x_{0}\\right)^{2}
- \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}}
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
sigma = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, sigma):
"""Two dimensional Mexican Hat model function"""
rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma ** 2)
return amplitude * (1 - rr_ww) * np.exp(- rr_ww)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('sigma', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class AiryDisk2D(Fittable2DModel):
"""
Two dimensional Airy disk model.
Parameters
----------
amplitude : float
Amplitude of the Airy function.
x_0 : float
x position of the maximum of the Airy function.
y_0 : float
y position of the maximum of the Airy function.
radius : float
The radius of the Airy disk (radius of the first zero).
See Also
--------
Box2D, TrapezoidDisk2D, Gaussian2D
Notes
-----
Model formula:
.. math:: f(r) = A \\left[\\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}}\\right]^2
Where :math:`J_1` is the first order Bessel function of the first
kind, :math:`r` is radial distance from the maximum of the Airy
function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R`
is the input ``radius`` parameter, and :math:`R_z =
1.2196698912665045`).
For an optical system, the radius of the first zero represents the
limiting angular resolution and is approximately 1.22 * lambda / D,
where lambda is the wavelength of the light and D is the diameter of
the aperture.
See [1]_ for more details about the Airy disk.
References
----------
.. [1] https://en.wikipedia.org/wiki/Airy_disk
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
radius = Parameter(default=1)
_rz = None
_j1 = None
@classmethod
def evaluate(cls, x, y, amplitude, x_0, y_0, radius):
"""Two dimensional Airy model function"""
if cls._rz is None:
try:
from scipy.special import j1, jn_zeros
cls._rz = jn_zeros(1, 1)[0] / np.pi
cls._j1 = j1
except ValueError:
raise ImportError('AiryDisk2D model requires scipy > 0.11.')
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz)
if isinstance(r, Quantity):
# scipy function cannot handle Quantity, so turn into array.
r = r.to_value(u.dimensionless_unscaled)
# Since r can be zero, we have to take care to treat that case
# separately so as not to raise a numpy warning
z = np.ones(r.shape)
rt = np.pi * r[r > 0]
z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2
if isinstance(amplitude, Quantity):
# make z quantity too, otherwise in-place multiplication fails.
z = Quantity(z, u.dimensionless_unscaled, copy=False)
z *= amplitude
return z
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('radius', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Moffat1D(Fittable1DModel):
"""
One dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian1D, Box1D
Notes
-----
Model formula:
.. math::
f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Moffat1D
plt.figure()
s1 = Moffat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
gamma = Parameter(default=1)
alpha = Parameter(default=1)
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach <http://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * self.gamma * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model function"""
return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model derivative with respect to parameters"""
d_A = (1 + (x - x_0) ** 2 / gamma ** 2) ** (-alpha)
d_x_0 = (-amplitude * alpha * d_A * (-2 * x + 2 * x_0) /
(gamma ** 2 * d_A ** alpha))
d_gamma = (2 * amplitude * alpha * d_A * (x - x_0) ** 2 /
(gamma ** 3 * d_A ** alpha))
d_alpha = -amplitude * d_A * np.log(1 + (x - x_0) ** 2 / gamma ** 2)
return [d_A, d_x_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('gamma', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Moffat2D(Fittable2DModel):
"""
Two dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
y_0 : float
y position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} +
\\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
gamma = Parameter(default=1)
alpha = Parameter(default=1)
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach <http://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * self.gamma * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model function"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
return amplitude * (1 + rr_gg) ** (-alpha)
@staticmethod
def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model derivative with respect to parameters"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
d_A = (1 + rr_gg) ** (-alpha)
d_x_0 = (-amplitude * alpha * d_A * (-2 * x + 2 * x_0) /
(gamma ** 2 * (1 + rr_gg)))
d_y_0 = (-amplitude * alpha * d_A * (-2 * y + 2 * y_0) /
(gamma ** 2 * (1 + rr_gg)))
d_alpha = -amplitude * d_A * np.log(1 + rr_gg)
d_gamma = 2 * amplitude * alpha * d_A * (rr_gg / (gamma * (1 + rr_gg)))
return [d_A, d_x_0, d_y_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('gamma', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Sersic2D(Fittable2DModel):
r"""
Two dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Central surface brightness, within r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
x_0 : float, optional
x position of the center.
y_0 : float, optional
y position of the center.
ellip : float, optional
Ellipticity.
theta : float, optional
Rotation angle in radians, counterclockwise from
the positive x-axis.
See Also
--------
Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
I(x,y) = I(r) = I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic2D
import matplotlib.pyplot as plt
x,y = np.meshgrid(np.arange(100), np.arange(100))
mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50,
ellip=.5, theta=-1)
img = mod(x, y)
log_img = np.log10(img)
plt.figure()
plt.imshow(log_img, origin='lower', interpolation='nearest',
vmin=-1, vmax=2)
plt.xlabel('x')
plt.ylabel('y')
cbar = plt.colorbar()
cbar.set_label('Log Brightness', rotation=270, labelpad=25)
cbar.set_ticks([-1, 0, 1, 2], update_ticks=True)
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1)
r_eff = Parameter(default=1)
n = Parameter(default=4)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
ellip = Parameter(default=0)
theta = Parameter(default=0)
_gammaincinv = None
@classmethod
def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta):
"""Two dimensional Sersic profile function."""
if cls._gammaincinv is None:
try:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
except ValueError:
raise ImportError('Sersic2D model requires scipy > 0.11.')
bn = cls._gammaincinv(2. * n, 0.5)
a, b = r_eff, (1 - ellip) * r_eff
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta
x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta
z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2)
return amplitude * np.exp(-bn * (z ** (1 / n) - 1))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('r_eff', inputs_unit['x']),
('theta', u.rad),
('amplitude', outputs_unit['z'])])
|
0be7bb717009d21d028ee1183c22b990b07a37a3c6841366af988f4e937e0056 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
Implements projections--particularly sky projections defined in WCS Paper II
[1]_.
All angles are set and and displayed in degrees but internally computations are
performed in radians. All functions expect inputs and outputs degrees.
References
----------
.. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II)
"""
import abc
import numpy as np
from .core import Model
from .parameters import Parameter, InputParameterError
from .. import units as u
from . import _projections
from .utils import _to_radian, _to_orig_unit
projcodes = [
'AZP', 'SZP', 'TAN', 'STG', 'SIN', 'ARC', 'ZEA', 'AIR', 'CYP',
'CEA', 'CAR', 'MER', 'SFL', 'PAR', 'MOL', 'AIT', 'COP', 'COE',
'COD', 'COO', 'BON', 'PCO', 'TSC', 'CSC', 'QSC', 'HPX', 'XPH'
]
__all__ = ['Projection', 'Pix2SkyProjection', 'Sky2PixProjection',
'Zenithal', 'Cylindrical', 'PseudoCylindrical', 'Conic',
'PseudoConic', 'QuadCube', 'HEALPix',
'AffineTransformation2D',
'projcodes',
'Pix2Sky_ZenithalPerspective', 'Sky2Pix_ZenithalPerspective',
'Pix2Sky_SlantZenithalPerspective', 'Sky2Pix_SlantZenithalPerspective',
'Pix2Sky_Gnomonic', 'Sky2Pix_Gnomonic',
'Pix2Sky_Stereographic', 'Sky2Pix_Stereographic',
'Pix2Sky_SlantOrthographic', 'Sky2Pix_SlantOrthographic',
'Pix2Sky_ZenithalEquidistant', 'Sky2Pix_ZenithalEquidistant',
'Pix2Sky_ZenithalEqualArea', 'Sky2Pix_ZenithalEqualArea',
'Pix2Sky_Airy', 'Sky2Pix_Airy',
'Pix2Sky_CylindricalPerspective', 'Sky2Pix_CylindricalPerspective',
'Pix2Sky_CylindricalEqualArea', 'Sky2Pix_CylindricalEqualArea',
'Pix2Sky_PlateCarree', 'Sky2Pix_PlateCarree',
'Pix2Sky_Mercator', 'Sky2Pix_Mercator',
'Pix2Sky_SansonFlamsteed', 'Sky2Pix_SansonFlamsteed',
'Pix2Sky_Parabolic', 'Sky2Pix_Parabolic',
'Pix2Sky_Molleweide', 'Sky2Pix_Molleweide',
'Pix2Sky_HammerAitoff', 'Sky2Pix_HammerAitoff',
'Pix2Sky_ConicPerspective', 'Sky2Pix_ConicPerspective',
'Pix2Sky_ConicEqualArea', 'Sky2Pix_ConicEqualArea',
'Pix2Sky_ConicEquidistant', 'Sky2Pix_ConicEquidistant',
'Pix2Sky_ConicOrthomorphic', 'Sky2Pix_ConicOrthomorphic',
'Pix2Sky_BonneEqualArea', 'Sky2Pix_BonneEqualArea',
'Pix2Sky_Polyconic', 'Sky2Pix_Polyconic',
'Pix2Sky_TangentialSphericalCube', 'Sky2Pix_TangentialSphericalCube',
'Pix2Sky_COBEQuadSphericalCube', 'Sky2Pix_COBEQuadSphericalCube',
'Pix2Sky_QuadSphericalCube', 'Sky2Pix_QuadSphericalCube',
'Pix2Sky_HEALPix', 'Sky2Pix_HEALPix',
'Pix2Sky_HEALPixPolar', 'Sky2Pix_HEALPixPolar',
# The following are short FITS WCS aliases
'Pix2Sky_AZP', 'Sky2Pix_AZP',
'Pix2Sky_SZP', 'Sky2Pix_SZP',
'Pix2Sky_TAN', 'Sky2Pix_TAN',
'Pix2Sky_STG', 'Sky2Pix_STG',
'Pix2Sky_SIN', 'Sky2Pix_SIN',
'Pix2Sky_ARC', 'Sky2Pix_ARC',
'Pix2Sky_ZEA', 'Sky2Pix_ZEA',
'Pix2Sky_AIR', 'Sky2Pix_AIR',
'Pix2Sky_CYP', 'Sky2Pix_CYP',
'Pix2Sky_CEA', 'Sky2Pix_CEA',
'Pix2Sky_CAR', 'Sky2Pix_CAR',
'Pix2Sky_MER', 'Sky2Pix_MER',
'Pix2Sky_SFL', 'Sky2Pix_SFL',
'Pix2Sky_PAR', 'Sky2Pix_PAR',
'Pix2Sky_MOL', 'Sky2Pix_MOL',
'Pix2Sky_AIT', 'Sky2Pix_AIT',
'Pix2Sky_COP', 'Sky2Pix_COP',
'Pix2Sky_COE', 'Sky2Pix_COE',
'Pix2Sky_COD', 'Sky2Pix_COD',
'Pix2Sky_COO', 'Sky2Pix_COO',
'Pix2Sky_BON', 'Sky2Pix_BON',
'Pix2Sky_PCO', 'Sky2Pix_PCO',
'Pix2Sky_TSC', 'Sky2Pix_TSC',
'Pix2Sky_CSC', 'Sky2Pix_CSC',
'Pix2Sky_QSC', 'Sky2Pix_QSC',
'Pix2Sky_HPX', 'Sky2Pix_HPX',
'Pix2Sky_XPH', 'Sky2Pix_XPH'
]
class Projection(Model):
"""Base class for all sky projections."""
# Radius of the generating sphere.
# This sets the circumference to 360 deg so that arc length is measured in deg.
r0 = 180 * u.deg / np.pi
_separable = False
@property
@abc.abstractmethod
def inverse(self):
"""
Inverse projection--all projection models must provide an inverse.
"""
class Pix2SkyProjection(Projection):
"""Base class for all Pix2Sky projections."""
inputs = ('x', 'y')
outputs = ('phi', 'theta')
_input_units_strict = True
_input_units_allow_dimensionless = True
@property
def input_units(self):
return {'x': u.deg, 'y': u.deg}
@property
def return_units(self):
return {'phi': u.deg, 'theta': u.deg}
class Sky2PixProjection(Projection):
"""Base class for all Sky2Pix projections."""
inputs = ('phi', 'theta')
outputs = ('x', 'y')
_input_units_strict = True
_input_units_allow_dimensionless = True
@property
def input_units(self):
return {'phi': u.deg, 'theta': u.deg}
@property
def return_units(self):
return {'x': u.deg, 'y': u.deg}
class Zenithal(Projection):
r"""Base class for all Zenithal projections.
Zenithal (or azimuthal) projections map the sphere directly onto a
plane. All zenithal projections are specified by defining the
radius as a function of native latitude, :math:`R_\theta`.
The pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg(-y, x) \\
R_\theta &= \sqrt{x^2 + y^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin \phi \\
y &= R_\theta \cos \phi
"""
_separable = False
class Pix2Sky_ZenithalPerspective(Pix2SkyProjection, Zenithal):
r"""
Zenithal perspective projection - pixel to sky.
Corresponds to the ``AZP`` projection in FITS WCS.
.. math::
\phi &= \arg(-y \cos \gamma, x) \\
\theta &= \left\{\genfrac{}{}{0pt}{}{\psi - \omega}{\psi + \omega + 180^{\circ}}\right.
where:
.. math::
\psi &= \arg(\rho, 1) \\
\omega &= \sin^{-1}\left(\frac{\rho \mu}{\sqrt{\rho^2 + 1}}\right) \\
\rho &= \frac{R}{\frac{180^{\circ}}{\pi}(\mu + 1) + y \sin \gamma} \\
R &= \sqrt{x^2 + y^2 \cos^2 \gamma}
Parameters
--------------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
gamma : float
Look angle γ in degrees. Default is 0°.
"""
mu = Parameter(default=0.0)
gamma = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
def __init__(self, mu=mu.default, gamma=gamma.default, **kwargs):
# units : mu - in spherical radii, gamma - in deg
# TODO: Support quantity objects here and in similar contexts
super().__init__(mu, gamma, **kwargs)
@mu.validator
def mu(self, value):
if np.any(value == -1):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1")
@property
def inverse(self):
return Sky2Pix_ZenithalPerspective(self.mu.value, self.gamma.value)
@classmethod
def evaluate(cls, x, y, mu, gamma):
return _projections.azpx2s(x, y, mu, _to_orig_unit(gamma))
Pix2Sky_AZP = Pix2Sky_ZenithalPerspective
class Sky2Pix_ZenithalPerspective(Sky2PixProjection, Zenithal):
r"""
Zenithal perspective projection - sky to pixel.
Corresponds to the ``AZP`` projection in FITS WCS.
.. math::
x &= R \sin \phi \\
y &= -R \sec \gamma \cos \theta
where:
.. math::
R = \frac{180^{\circ}}{\pi} \frac{(\mu + 1) \cos \theta}{(\mu + \sin \theta) + \cos \theta \cos \phi \tan \gamma}
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
gamma : float
Look angle γ in degrees. Default is 0°.
"""
mu = Parameter(default=0.0)
gamma = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
@mu.validator
def mu(self, value):
if np.any(value == -1):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1")
@property
def inverse(self):
return Pix2Sky_AZP(self.mu.value, self.gamma.value)
@classmethod
def evaluate(cls, phi, theta, mu, gamma):
return _projections.azps2x(
phi, theta, mu, _to_orig_unit(gamma))
Sky2Pix_AZP = Sky2Pix_ZenithalPerspective
class Pix2Sky_SlantZenithalPerspective(Pix2SkyProjection, Zenithal):
r"""
Slant zenithal perspective projection - pixel to sky.
Corresponds to the ``SZP`` projection in FITS WCS.
Parameters
--------------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
phi0 : float
The longitude φ₀ of the reference point, in degrees. Default
is 0°.
theta0 : float
The latitude θ₀ of the reference point, in degrees. Default
is 90°.
"""
def _validate_mu(mu):
if np.asarray(mu == -1).any():
raise ValueError(
"Zenithal perspective projection is not defined for mu=-1")
return mu
mu = Parameter(default=0.0, setter=_validate_mu)
phi0 = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
theta0 = Parameter(default=90.0, getter=_to_orig_unit, setter=_to_radian)
@property
def inverse(self):
return Sky2Pix_SlantZenithalPerspective(
self.mu.value, self.phi0.value, self.theta0.value)
@classmethod
def evaluate(cls, x, y, mu, phi0, theta0):
return _projections.szpx2s(
x, y, mu, _to_orig_unit(phi0), _to_orig_unit(theta0))
Pix2Sky_SZP = Pix2Sky_SlantZenithalPerspective
class Sky2Pix_SlantZenithalPerspective(Sky2PixProjection, Zenithal):
r"""
Zenithal perspective projection - sky to pixel.
Corresponds to the ``SZP`` projection in FITS WCS.
Parameters
----------
mu : float
distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
phi0 : float
The longitude φ₀ of the reference point, in degrees. Default
is 0°.
theta0 : float
The latitude θ₀ of the reference point, in degrees. Default
is 90°.
"""
def _validate_mu(mu):
if np.asarray(mu == -1).any():
raise ValueError("Zenithal perspective projection is not defined for mu=-1")
return mu
mu = Parameter(default=0.0, setter=_validate_mu)
phi0 = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
theta0 = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
@property
def inverse(self):
return Pix2Sky_SlantZenithalPerspective(
self.mu.value, self.phi0.value, self.theta0.value)
@classmethod
def evaluate(cls, phi, theta, mu, phi0, theta0):
return _projections.szps2x(
phi, theta, mu, _to_orig_unit(phi0), _to_orig_unit(theta0))
Sky2Pix_SZP = Sky2Pix_SlantZenithalPerspective
class Pix2Sky_Gnomonic(Pix2SkyProjection, Zenithal):
r"""
Gnomonic projection - pixel to sky.
Corresponds to the ``TAN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = \tan^{-1}\left(\frac{180^{\circ}}{\pi R_\theta}\right)
"""
@property
def inverse(self):
return Sky2Pix_Gnomonic()
@classmethod
def evaluate(cls, x, y):
return _projections.tanx2s(x, y)
Pix2Sky_TAN = Pix2Sky_Gnomonic
class Sky2Pix_Gnomonic(Sky2PixProjection, Zenithal):
r"""
Gnomonic Projection - sky to pixel.
Corresponds to the ``TAN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\cot \theta
"""
@property
def inverse(self):
return Pix2Sky_Gnomonic()
@classmethod
def evaluate(cls, phi, theta):
return _projections.tans2x(phi, theta)
Sky2Pix_TAN = Sky2Pix_Gnomonic
class Pix2Sky_Stereographic(Pix2SkyProjection, Zenithal):
r"""
Stereographic Projection - pixel to sky.
Corresponds to the ``STG`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^{\circ} - 2 \tan^{-1}\left(\frac{\pi R_\theta}{360^{\circ}}\right)
"""
@property
def inverse(self):
return Sky2Pix_Stereographic()
@classmethod
def evaluate(cls, x, y):
return _projections.stgx2s(x, y)
Pix2Sky_STG = Pix2Sky_Stereographic
class Sky2Pix_Stereographic(Sky2PixProjection, Zenithal):
r"""
Stereographic Projection - sky to pixel.
Corresponds to the ``STG`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\frac{2 \cos \theta}{1 + \sin \theta}
"""
@property
def inverse(self):
return Pix2Sky_Stereographic()
@classmethod
def evaluate(cls, phi, theta):
return _projections.stgs2x(phi, theta)
Sky2Pix_STG = Sky2Pix_Stereographic
class Pix2Sky_SlantOrthographic(Pix2SkyProjection, Zenithal):
r"""
Slant orthographic projection - pixel to sky.
Corresponds to the ``SIN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
The following transformation applies when :math:`\xi` and
:math:`\eta` are both zero.
.. math::
\theta = \cos^{-1}\left(\frac{\pi}{180^{\circ}}R_\theta\right)
The parameters :math:`\xi` and :math:`\eta` are defined from the
reference point :math:`(\phi_c, \theta_c)` as:
.. math::
\xi &= \cot \theta_c \sin \phi_c \\
\eta &= - \cot \theta_c \cos \phi_c
Parameters
----------
xi : float
Obliqueness parameter, ξ. Default is 0.0.
eta : float
Obliqueness parameter, η. Default is 0.0.
"""
xi = Parameter(default=0.0)
eta = Parameter(default=0.0)
@property
def inverse(self):
return Sky2Pix_SlantOrthographic(self.xi.value, self.eta.value)
@classmethod
def evaluate(cls, x, y, xi, eta):
return _projections.sinx2s(x, y, xi, eta)
Pix2Sky_SIN = Pix2Sky_SlantOrthographic
class Sky2Pix_SlantOrthographic(Sky2PixProjection, Zenithal):
r"""
Slant orthographic projection - sky to pixel.
Corresponds to the ``SIN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
The following transformation applies when :math:`\xi` and
:math:`\eta` are both zero.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\cos \theta
But more specifically are:
.. math::
x &= \frac{180^\circ}{\pi}[\cos \theta \sin \phi + \xi(1 - \sin \theta)] \\
y &= \frac{180^\circ}{\pi}[\cos \theta \cos \phi + \eta(1 - \sin \theta)]
"""
xi = Parameter(default=0.0)
eta = Parameter(default=0.0)
@property
def inverse(self):
return Pix2Sky_SlantOrthographic(self.xi.value, self.eta.value)
@classmethod
def evaluate(cls, phi, theta, xi, eta):
return _projections.sins2x(phi, theta, xi, eta)
Sky2Pix_SIN = Sky2Pix_SlantOrthographic
class Pix2Sky_ZenithalEquidistant(Pix2SkyProjection, Zenithal):
r"""
Zenithal equidistant projection - pixel to sky.
Corresponds to the ``ARC`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^\circ - R_\theta
"""
@property
def inverse(self):
return Sky2Pix_ZenithalEquidistant()
@classmethod
def evaluate(cls, x, y):
return _projections.arcx2s(x, y)
Pix2Sky_ARC = Pix2Sky_ZenithalEquidistant
class Sky2Pix_ZenithalEquidistant(Sky2PixProjection, Zenithal):
r"""
Zenithal equidistant projection - sky to pixel.
Corresponds to the ``ARC`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = 90^\circ - \theta
"""
@property
def inverse(self):
return Pix2Sky_ZenithalEquidistant()
@classmethod
def evaluate(cls, phi, theta):
return _projections.arcs2x(phi, theta)
Sky2Pix_ARC = Sky2Pix_ZenithalEquidistant
class Pix2Sky_ZenithalEqualArea(Pix2SkyProjection, Zenithal):
r"""
Zenithal equidistant projection - pixel to sky.
Corresponds to the ``ZEA`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^\circ - 2 \sin^{-1} \left(\frac{\pi R_\theta}{360^\circ}\right)
"""
@property
def inverse(self):
return Sky2Pix_ZenithalEqualArea()
@classmethod
def evaluate(cls, x, y):
return _projections.zeax2s(x, y)
Pix2Sky_ZEA = Pix2Sky_ZenithalEqualArea
class Sky2Pix_ZenithalEqualArea(Sky2PixProjection, Zenithal):
r"""
Zenithal equidistant projection - sky to pixel.
Corresponds to the ``ZEA`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta &= \frac{180^\circ}{\pi} \sqrt{2(1 - \sin\theta)} \\
&= \frac{360^\circ}{\pi} \sin\left(\frac{90^\circ - \theta}{2}\right)
"""
@property
def inverse(self):
return Pix2Sky_ZenithalEqualArea()
@classmethod
def evaluate(cls, phi, theta):
return _projections.zeas2x(phi, theta)
Sky2Pix_ZEA = Sky2Pix_ZenithalEqualArea
class Pix2Sky_Airy(Pix2SkyProjection, Zenithal):
r"""
Airy projection - pixel to sky.
Corresponds to the ``AIR`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
Parameters
----------
theta_b : float
The latitude :math:`\theta_b` at which to minimize the error,
in degrees. Default is 90°.
"""
theta_b = Parameter(default=90.0)
@property
def inverse(self):
return Sky2Pix_Airy(self.theta_b.value)
@classmethod
def evaluate(cls, x, y, theta_b):
return _projections.airx2s(x, y, theta_b)
Pix2Sky_AIR = Pix2Sky_Airy
class Sky2Pix_Airy(Sky2PixProjection, Zenithal):
r"""
Airy - sky to pixel.
Corresponds to the ``AIR`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = -2 \frac{180^\circ}{\pi}\left(\frac{\ln(\cos \xi)}{\tan \xi} + \frac{\ln(\cos \xi_b)}{\tan^2 \xi_b} \tan \xi \right)
where:
.. math::
\xi &= \frac{90^\circ - \theta}{2} \\
\xi_b &= \frac{90^\circ - \theta_b}{2}
Parameters
----------
theta_b : float
The latitude :math:`\theta_b` at which to minimize the error,
in degrees. Default is 90°.
"""
theta_b = Parameter(default=90.0)
@property
def inverse(self):
return Pix2Sky_Airy(self.theta_b.value)
@classmethod
def evaluate(cls, phi, theta, theta_b):
return _projections.airs2x(phi, theta, theta_b)
Sky2Pix_AIR = Sky2Pix_Airy
class Cylindrical(Projection):
r"""Base class for Cylindrical projections.
Cylindrical projections are so-named because the surface of
projection is a cylinder.
"""
_separable = True
class Pix2Sky_CylindricalPerspective(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical perspective - pixel to sky.
Corresponds to the ``CYP`` projection in FITS WCS.
.. math::
\phi &= \frac{x}{\lambda} \\
\theta &= \arg(1, \eta) + \sin{-1}\left(\frac{\eta \mu}{\sqrt{\eta^2 + 1}}\right)
where:
.. math::
\eta = \frac{\pi}{180^{\circ}}\frac{y}{\mu + \lambda}
Parameters
----------
mu : float
Distance from center of sphere in the direction opposite the
projected surface, in spherical radii, μ. Default is 1.
lam : float
Radius of the cylinder in spherical radii, λ. Default is 1.
"""
mu = Parameter(default=1.0)
lam = Parameter(default=1.0)
@mu.validator
def mu(self, value):
if np.any(value == -self.lam):
raise InputParameterError(
"CYP projection is not defined for mu = -lambda")
@lam.validator
def lam(self, value):
if np.any(value == -self.mu):
raise InputParameterError(
"CYP projection is not defined for lambda = -mu")
@property
def inverse(self):
return Sky2Pix_CylindricalPerspective(self.mu.value, self.lam.value)
@classmethod
def evaluate(cls, x, y, mu, lam):
return _projections.cypx2s(x, y, mu, lam)
Pix2Sky_CYP = Pix2Sky_CylindricalPerspective
class Sky2Pix_CylindricalPerspective(Sky2PixProjection, Cylindrical):
r"""
Cylindrical Perspective - sky to pixel.
Corresponds to the ``CYP`` projection in FITS WCS.
.. math::
x &= \lambda \phi \\
y &= \frac{180^{\circ}}{\pi}\left(\frac{\mu + \lambda}{\mu + \cos \theta}\right)\sin \theta
Parameters
----------
mu : float
Distance from center of sphere in the direction opposite the
projected surface, in spherical radii, μ. Default is 0.
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
mu = Parameter(default=1.0)
lam = Parameter(default=1.0)
@mu.validator
def mu(self, value):
if np.any(value == -self.lam):
raise InputParameterError(
"CYP projection is not defined for mu = -lambda")
@lam.validator
def lam(self, value):
if np.any(value == -self.mu):
raise InputParameterError(
"CYP projection is not defined for lambda = -mu")
@property
def inverse(self):
return Pix2Sky_CylindricalPerspective(self.mu, self.lam)
@classmethod
def evaluate(cls, phi, theta, mu, lam):
return _projections.cyps2x(phi, theta, mu, lam)
Sky2Pix_CYP = Sky2Pix_CylindricalPerspective
class Pix2Sky_CylindricalEqualArea(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical equal area projection - pixel to sky.
Corresponds to the ``CEA`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^{\circ}}\lambda y\right)
Parameters
----------
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
lam = Parameter(default=1)
@property
def inverse(self):
return Sky2Pix_CylindricalEqualArea(self.lam)
@classmethod
def evaluate(cls, x, y, lam):
return _projections.ceax2s(x, y, lam)
Pix2Sky_CEA = Pix2Sky_CylindricalEqualArea
class Sky2Pix_CylindricalEqualArea(Sky2PixProjection, Cylindrical):
r"""
Cylindrical equal area projection - sky to pixel.
Corresponds to the ``CEA`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \frac{180^{\circ}}{\pi}\frac{\sin \theta}{\lambda}
Parameters
----------
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
lam = Parameter(default=1)
@property
def inverse(self):
return Pix2Sky_CylindricalEqualArea(self.lam)
@classmethod
def evaluate(cls, phi, theta, lam):
return _projections.ceas2x(phi, theta, lam)
Sky2Pix_CEA = Sky2Pix_CylindricalEqualArea
class Pix2Sky_PlateCarree(Pix2SkyProjection, Cylindrical):
r"""
Plate carrée projection - pixel to sky.
Corresponds to the ``CAR`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= y
"""
@property
def inverse(self):
return Sky2Pix_PlateCarree()
@staticmethod
def evaluate(x, y):
# The intermediate variables are only used here for clarity
phi = np.array(x, copy=True)
theta = np.array(y, copy=True)
return phi, theta
Pix2Sky_CAR = Pix2Sky_PlateCarree
class Sky2Pix_PlateCarree(Sky2PixProjection, Cylindrical):
r"""
Plate carrée projection - sky to pixel.
Corresponds to the ``CAR`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \theta
"""
@property
def inverse(self):
return Pix2Sky_PlateCarree()
@staticmethod
def evaluate(phi, theta):
# The intermediate variables are only used here for clarity
x = np.array(phi, copy=True)
y = np.array(theta, copy=True)
return x, y
Sky2Pix_CAR = Sky2Pix_PlateCarree
class Pix2Sky_Mercator(Pix2SkyProjection, Cylindrical):
r"""
Mercator - pixel to sky.
Corresponds to the ``MER`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= 2 \tan^{-1}\left(e^{y \pi / 180^{\circ}}\right)-90^{\circ}
"""
@property
def inverse(self):
return Sky2Pix_Mercator()
@classmethod
def evaluate(cls, x, y):
return _projections.merx2s(x, y)
Pix2Sky_MER = Pix2Sky_Mercator
class Sky2Pix_Mercator(Sky2PixProjection, Cylindrical):
r"""
Mercator - sky to pixel.
Corresponds to the ``MER`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \frac{180^{\circ}}{\pi}\ln \tan \left(\frac{90^{\circ} + \theta}{2}\right)
"""
@property
def inverse(self):
return Pix2Sky_Mercator()
@classmethod
def evaluate(cls, phi, theta):
return _projections.mers2x(phi, theta)
Sky2Pix_MER = Sky2Pix_Mercator
class PseudoCylindrical(Projection):
r"""Base class for pseudocylindrical projections.
Pseudocylindrical projections are like cylindrical projections
except the parallels of latitude are projected at diminishing
lengths toward the polar regions in order to reduce lateral
distortion there. Consequently, the meridians are curved.
"""
_separable = True
class Pix2Sky_SansonFlamsteed(Pix2SkyProjection, PseudoCylindrical):
r"""
Sanson-Flamsteed projection - pixel to sky.
Corresponds to the ``SFL`` projection in FITS WCS.
.. math::
\phi &= \frac{x}{\cos y} \\
\theta &= y
"""
@property
def inverse(self):
return Sky2Pix_SansonFlamsteed()
@classmethod
def evaluate(cls, x, y):
return _projections.sflx2s(x, y)
Pix2Sky_SFL = Pix2Sky_SansonFlamsteed
class Sky2Pix_SansonFlamsteed(Sky2PixProjection, PseudoCylindrical):
r"""
Sanson-Flamsteed projection - sky to pixel.
Corresponds to the ``SFL`` projection in FITS WCS.
.. math::
x &= \phi \cos \theta \\
y &= \theta
"""
@property
def inverse(self):
return Pix2Sky_SansonFlamsteed()
@classmethod
def evaluate(cls, phi, theta):
return _projections.sfls2x(phi, theta)
Sky2Pix_SFL = Sky2Pix_SansonFlamsteed
class Pix2Sky_Parabolic(Pix2SkyProjection, PseudoCylindrical):
r"""
Parabolic projection - pixel to sky.
Corresponds to the ``PAR`` projection in FITS WCS.
.. math::
\phi &= \frac{180^\circ}{\pi} \frac{x}{1 - 4(y / 180^\circ)^2} \\
\theta &= 3 \sin^{-1}\left(\frac{y}{180^\circ}\right)
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_Parabolic()
@classmethod
def evaluate(cls, x, y):
return _projections.parx2s(x, y)
Pix2Sky_PAR = Pix2Sky_Parabolic
class Sky2Pix_Parabolic(Sky2PixProjection, PseudoCylindrical):
r"""
Parabolic projection - sky to pixel.
Corresponds to the ``PAR`` projection in FITS WCS.
.. math::
x &= \phi \left(2\cos\frac{2\theta}{3} - 1\right) \\
y &= 180^\circ \sin \frac{\theta}{3}
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_Parabolic()
@classmethod
def evaluate(cls, phi, theta):
return _projections.pars2x(phi, theta)
Sky2Pix_PAR = Sky2Pix_Parabolic
class Pix2Sky_Molleweide(Pix2SkyProjection, PseudoCylindrical):
r"""
Molleweide's projection - pixel to sky.
Corresponds to the ``MOL`` projection in FITS WCS.
.. math::
\phi &= \frac{\pi x}{2 \sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}} \\
\theta &= \sin^{-1}\left(\frac{1}{90^\circ}\sin^{-1}\left(\frac{\pi}{180^\circ}\frac{y}{\sqrt{2}}\right) + \frac{y}{180^\circ}\sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}\right)
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_Molleweide()
@classmethod
def evaluate(cls, x, y):
return _projections.molx2s(x, y)
Pix2Sky_MOL = Pix2Sky_Molleweide
class Sky2Pix_Molleweide(Sky2PixProjection, PseudoCylindrical):
r"""
Molleweide's projection - sky to pixel.
Corresponds to the ``MOL`` projection in FITS WCS.
.. math::
x &= \frac{2 \sqrt{2}}{\pi} \phi \cos \gamma \\
y &= \sqrt{2} \frac{180^\circ}{\pi} \sin \gamma
where :math:`\gamma` is defined as the solution of the
transcendental equation:
.. math::
\sin \theta = \frac{\gamma}{90^\circ} + \frac{\sin 2 \gamma}{\pi}
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_Molleweide()
@classmethod
def evaluate(cls, phi, theta):
return _projections.mols2x(phi, theta)
Sky2Pix_MOL = Sky2Pix_Molleweide
class Pix2Sky_HammerAitoff(Pix2SkyProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - pixel to sky.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
\phi &= 2 \arg \left(2Z^2 - 1, \frac{\pi}{180^\circ} \frac{Z}{2}x\right) \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^\circ}yZ\right)
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_HammerAitoff()
@classmethod
def evaluate(cls, x, y):
return _projections.aitx2s(x, y)
Pix2Sky_AIT = Pix2Sky_HammerAitoff
class Sky2Pix_HammerAitoff(Sky2PixProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - sky to pixel.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
x &= 2 \gamma \cos \theta \sin \frac{\phi}{2} \\
y &= \gamma \sin \theta
where:
.. math::
\gamma = \frac{180^\circ}{\pi} \sqrt{\frac{2}{1 + \cos \theta \cos(\phi / 2)}}
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_HammerAitoff()
@classmethod
def evaluate(cls, phi, theta):
return _projections.aits2x(phi, theta)
Sky2Pix_AIT = Sky2Pix_HammerAitoff
class Conic(Projection):
r"""Base class for conic projections.
In conic projections, the sphere is thought to be projected onto
the surface of a cone which is then opened out.
In a general sense, the pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right) / C \\
R_\theta &= \mathrm{sign} \theta_a \sqrt{x^2 + (Y_0 - y)^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin (C \phi) \\
y &= R_\theta \cos (C \phi) + Y_0
where :math:`C` is the "constant of the cone":
.. math::
C = \frac{180^\circ \cos \theta}{\pi R_\theta}
"""
sigma = Parameter(default=90.0, getter=_to_orig_unit, setter=_to_radian)
delta = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
_separable = False
class Pix2Sky_ConicPerspective(Pix2SkyProjection, Conic):
r"""
Colles' conic perspective projection - pixel to sky.
Corresponds to the ``COP`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulæ are:
.. math::
C &= \sin \theta_a \\
R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\
Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Sky2Pix_ConicPerspective(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, x, y, sigma, delta):
return _projections.copx2s(x, y, _to_orig_unit(sigma), _to_orig_unit(delta))
Pix2Sky_COP = Pix2Sky_ConicPerspective
class Sky2Pix_ConicPerspective(Sky2PixProjection, Conic):
r"""
Colles' conic perspective projection - sky to pixel.
Corresponds to the ``COP`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulæ are:
.. math::
C &= \sin \theta_a \\
R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\
Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Pix2Sky_ConicPerspective(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, phi, theta, sigma, delta):
return _projections.cops2x(phi, theta,
_to_orig_unit(sigma), _to_orig_unit(delta))
Sky2Pix_COP = Sky2Pix_ConicPerspective
class Pix2Sky_ConicEqualArea(Pix2SkyProjection, Conic):
r"""
Alber's conic equal area projection - pixel to sky.
Corresponds to the ``COE`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulæ are:
.. math::
C &= \gamma / 2 \\
R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\
Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)}
where:
.. math::
\gamma = \sin \theta_1 + \sin \theta_2
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Sky2Pix_ConicEqualArea(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, x, y, sigma, delta):
return _projections.coex2s(x, y, _to_orig_unit(sigma), _to_orig_unit(delta))
Pix2Sky_COE = Pix2Sky_ConicEqualArea
class Sky2Pix_ConicEqualArea(Sky2PixProjection, Conic):
r"""
Alber's conic equal area projection - sky to pixel.
Corresponds to the ``COE`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulæ are:
.. math::
C &= \gamma / 2 \\
R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\
Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)}
where:
.. math::
\gamma = \sin \theta_1 + \sin \theta_2
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Pix2Sky_ConicEqualArea(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, phi, theta, sigma, delta):
return _projections.coes2x(phi, theta,
_to_orig_unit(sigma), _to_orig_unit(delta))
Sky2Pix_COE = Sky2Pix_ConicEqualArea
class Pix2Sky_ConicEquidistant(Pix2SkyProjection, Conic):
r"""
Conic equidistant projection - pixel to sky.
Corresponds to the ``COD`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulæ are:
.. math::
C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\
R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\
Y_0 = \eta\cot\eta\cot\theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Sky2Pix_ConicEquidistant(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, x, y, sigma, delta):
return _projections.codx2s(x, y, _to_orig_unit(sigma), _to_orig_unit(delta))
Pix2Sky_COD = Pix2Sky_ConicEquidistant
class Sky2Pix_ConicEquidistant(Sky2PixProjection, Conic):
r"""
Conic equidistant projection - sky to pixel.
Corresponds to the ``COD`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulæ are:
.. math::
C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\
R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\
Y_0 = \eta\cot\eta\cot\theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Pix2Sky_ConicEquidistant(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, phi, theta, sigma, delta):
return _projections.cods2x(phi, theta,
_to_orig_unit(sigma), _to_orig_unit(delta))
Sky2Pix_COD = Sky2Pix_ConicEquidistant
class Pix2Sky_ConicOrthomorphic(Pix2SkyProjection, Conic):
r"""
Conic orthomorphic projection - pixel to sky.
Corresponds to the ``COO`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulæ are:
.. math::
C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)}
{\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)}
{\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\
R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\
Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C
where:
.. math::
\psi = \frac{180^\circ}{\pi} \frac{\cos \theta}
{C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C}
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Sky2Pix_ConicOrthomorphic(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, x, y, sigma, delta):
return _projections.coox2s(x, y, _to_orig_unit(sigma), _to_orig_unit(delta))
Pix2Sky_COO = Pix2Sky_ConicOrthomorphic
class Sky2Pix_ConicOrthomorphic(Sky2PixProjection, Conic):
r"""
Conic orthomorphic projection - sky to pixel.
Corresponds to the ``COO`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulæ are:
.. math::
C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)}
{\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)}
{\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\
R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\
Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C
where:
.. math::
\psi = \frac{180^\circ}{\pi} \frac{\cos \theta}
{C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C}
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Pix2Sky_ConicOrthomorphic(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, phi, theta, sigma, delta):
return _projections.coos2x(phi, theta,
_to_orig_unit(sigma), _to_orig_unit(delta))
Sky2Pix_COO = Sky2Pix_ConicOrthomorphic
class PseudoConic(Projection):
r"""Base class for pseudoconic projections.
Pseudoconics are a subclass of conics with concentric parallels.
"""
class Pix2Sky_BonneEqualArea(Pix2SkyProjection, PseudoConic):
r"""
Bonne's equal area pseudoconic projection - pixel to sky.
Corresponds to the ``BON`` projection in FITS WCS.
.. math::
\phi &= \frac{\pi}{180^\circ} A_\phi R_\theta / \cos \theta \\
\theta &= Y_0 - R_\theta
where:
.. math::
R_\theta &= \mathrm{sign} \theta_1 \sqrt{x^2 + (Y_0 - y)^2} \\
A_\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right)
Parameters
----------
theta1 : float
Bonne conformal latitude, in degrees.
"""
theta1 = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
_separable = True
@property
def inverse(self):
return Sky2Pix_BonneEqualArea(self.theta1.value)
@classmethod
def evaluate(cls, x, y, theta1):
return _projections.bonx2s(x, y, _to_orig_unit(theta1))
Pix2Sky_BON = Pix2Sky_BonneEqualArea
class Sky2Pix_BonneEqualArea(Sky2PixProjection, PseudoConic):
r"""
Bonne's equal area pseudoconic projection - sky to pixel.
Corresponds to the ``BON`` projection in FITS WCS.
.. math::
x &= R_\theta \sin A_\phi \\
y &= -R_\theta \cos A_\phi + Y_0
where:
.. math::
A_\phi &= \frac{180^\circ}{\pi R_\theta} \phi \cos \theta \\
R_\theta &= Y_0 - \theta \\
Y_0 &= \frac{180^\circ}{\pi} \cot \theta_1 + \theta_1
Parameters
----------
theta1 : float
Bonne conformal latitude, in degrees.
"""
theta1 = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
_separable = True
@property
def inverse(self):
return Pix2Sky_BonneEqualArea(self.theta1.value)
@classmethod
def evaluate(cls, phi, theta, theta1):
return _projections.bons2x(phi, theta,
_to_orig_unit(theta1))
Sky2Pix_BON = Sky2Pix_BonneEqualArea
class Pix2Sky_Polyconic(Pix2SkyProjection, PseudoConic):
r"""
Polyconic projection - pixel to sky.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_Polyconic()
@classmethod
def evaluate(cls, x, y):
return _projections.pcox2s(x, y)
Pix2Sky_PCO = Pix2Sky_Polyconic
class Sky2Pix_Polyconic(Sky2PixProjection, PseudoConic):
r"""
Polyconic projection - sky to pixel.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_Polyconic()
@classmethod
def evaluate(cls, phi, theta):
return _projections.pcos2x(phi, theta)
Sky2Pix_PCO = Sky2Pix_Polyconic
class QuadCube(Projection):
r"""Base class for quad cube projections.
Quadrilateralized spherical cube (quad-cube) projections belong to
the class of polyhedral projections in which the sphere is
projected onto the surface of an enclosing polyhedron.
The six faces of the quad-cube projections are numbered and laid
out as::
0
4 3 2 1 4 3 2
5
"""
class Pix2Sky_TangentialSphericalCube(Pix2SkyProjection, QuadCube):
r"""
Tangential spherical cube projection - pixel to sky.
Corresponds to the ``TSC`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_TangentialSphericalCube()
@classmethod
def evaluate(cls, x, y):
return _projections.tscx2s(x, y)
Pix2Sky_TSC = Pix2Sky_TangentialSphericalCube
class Sky2Pix_TangentialSphericalCube(Sky2PixProjection, QuadCube):
r"""
Tangential spherical cube projection - sky to pixel.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_TangentialSphericalCube()
@classmethod
def evaluate(cls, phi, theta):
return _projections.tscs2x(phi, theta)
Sky2Pix_TSC = Sky2Pix_TangentialSphericalCube
class Pix2Sky_COBEQuadSphericalCube(Pix2SkyProjection, QuadCube):
r"""
COBE quadrilateralized spherical cube projection - pixel to sky.
Corresponds to the ``CSC`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_COBEQuadSphericalCube()
@classmethod
def evaluate(cls, x, y):
return _projections.cscx2s(x, y)
Pix2Sky_CSC = Pix2Sky_COBEQuadSphericalCube
class Sky2Pix_COBEQuadSphericalCube(Sky2PixProjection, QuadCube):
r"""
COBE quadrilateralized spherical cube projection - sky to pixel.
Corresponds to the ``CSC`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_COBEQuadSphericalCube()
@classmethod
def evaluate(cls, phi, theta):
return _projections.cscs2x(phi, theta)
Sky2Pix_CSC = Sky2Pix_COBEQuadSphericalCube
class Pix2Sky_QuadSphericalCube(Pix2SkyProjection, QuadCube):
r"""
Quadrilateralized spherical cube projection - pixel to sky.
Corresponds to the ``QSC`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_QuadSphericalCube()
@classmethod
def evaluate(cls, x, y):
return _projections.qscx2s(x, y)
Pix2Sky_QSC = Pix2Sky_QuadSphericalCube
class Sky2Pix_QuadSphericalCube(Sky2PixProjection, QuadCube):
r"""
Quadrilateralized spherical cube projection - sky to pixel.
Corresponds to the ``QSC`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_QuadSphericalCube()
@classmethod
def evaluate(cls, phi, theta):
return _projections.qscs2x(phi, theta)
Sky2Pix_QSC = Sky2Pix_QuadSphericalCube
class HEALPix(Projection):
r"""Base class for HEALPix projections.
"""
class Pix2Sky_HEALPix(Pix2SkyProjection, HEALPix):
r"""
HEALPix - pixel to sky.
Corresponds to the ``HPX`` projection in FITS WCS.
Parameters
----------
H : float
The number of facets in longitude direction.
X : float
The number of facets in latitude direction.
"""
_separable = True
H = Parameter(default=4.0)
X = Parameter(default=3.0)
@property
def inverse(self):
return Sky2Pix_HEALPix(self.H.value, self.X.value)
@classmethod
def evaluate(cls, x, y, H, X):
return _projections.hpxx2s(x, y, H, X)
Pix2Sky_HPX = Pix2Sky_HEALPix
class Sky2Pix_HEALPix(Sky2PixProjection, HEALPix):
r"""
HEALPix projection - sky to pixel.
Corresponds to the ``HPX`` projection in FITS WCS.
Parameters
----------
H : float
The number of facets in longitude direction.
X : float
The number of facets in latitude direction.
"""
_separable = True
H = Parameter(default=4.0)
X = Parameter(default=3.0)
@property
def inverse(self):
return Pix2Sky_HEALPix(self.H.value, self.X.value)
@classmethod
def evaluate(cls, phi, theta, H, X):
return _projections.hpxs2x(phi, theta, H, X)
Sky2Pix_HPX = Sky2Pix_HEALPix
class Pix2Sky_HEALPixPolar(Pix2SkyProjection, HEALPix):
r"""
HEALPix polar, aka "butterfly" projection - pixel to sky.
Corresponds to the ``XPH`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_HEALPix()
@classmethod
def evaluate(cls, x, y):
return _projections.xphx2s(x, y)
Pix2Sky_XPH = Pix2Sky_HEALPixPolar
class Sky2Pix_HEALPixPolar(Sky2PixProjection, HEALPix):
r"""
HEALPix polar, aka "butterfly" projection - pixel to sky.
Corresponds to the ``XPH`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_HEALPix()
@classmethod
def evaluate(cls, phi, theta):
return _projections.hpxs2x(phi, theta)
Sky2Pix_XPH = Sky2Pix_HEALPixPolar
class AffineTransformation2D(Model):
"""
Perform an affine transformation in 2 dimensions.
Parameters
----------
matrix : array
A 2x2 matrix specifying the linear transformation to apply to the
inputs
translation : array
A 2D vector (given as either a 2x1 or 1x2 array) specifying a
translation to apply to the inputs
"""
inputs = ('x', 'y')
outputs = ('x', 'y')
standard_broadcasting = False
_separable = False
matrix = Parameter(default=[[1.0, 0.0], [0.0, 1.0]])
translation = Parameter(default=[0.0, 0.0])
@matrix.validator
def matrix(self, value):
"""Validates that the input matrix is a 2x2 2D array."""
if np.shape(value) != (2, 2):
raise InputParameterError(
"Expected transformation matrix to be a 2x2 array")
@translation.validator
def translation(self, value):
"""
Validates that the translation vector is a 2D vector. This allows
either a "row" vector or a "column" vector where in the latter case the
resultant Numpy array has ``ndim=2`` but the shape is ``(1, 2)``.
"""
if not ((np.ndim(value) == 1 and np.shape(value) == (2,)) or
(np.ndim(value) == 2 and np.shape(value) == (1, 2))):
raise InputParameterError(
"Expected translation vector to be a 2 element row or column "
"vector array")
@property
def inverse(self):
"""
Inverse transformation.
Raises `~astropy.modeling.InputParameterError` if the transformation cannot be inverted.
"""
det = np.linalg.det(self.matrix.value)
if det == 0:
raise InputParameterError(
"Transformation matrix is singular; {0} model does not "
"have an inverse".format(self.__class__.__name__))
matrix = np.linalg.inv(self.matrix.value)
if self.matrix.unit is not None:
matrix = matrix * self.matrix.unit
# If matrix has unit then translation has unit, so no need to assign it.
translation = -np.dot(matrix, self.translation.value)
return self.__class__(matrix=matrix, translation=translation)
@classmethod
def evaluate(cls, x, y, matrix, translation):
"""
Apply the transformation to a set of 2D Cartesian coordinates given as
two lists--one for the x coordinates and one for a y coordinates--or a
single coordinate pair.
Parameters
----------
x, y : array, float
x and y coordinates
"""
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
shape = x.shape or (1,)
inarr = np.vstack([x.flatten(), y.flatten(), np.ones(x.size)])
if inarr.shape[0] != 3 or inarr.ndim != 2:
raise ValueError("Incompatible input shapes")
augmented_matrix = cls._create_augmented_matrix(matrix, translation)
result = np.dot(augmented_matrix, inarr)
x, y = result[0], result[1]
x.shape = y.shape = shape
return x, y
@staticmethod
def _create_augmented_matrix(matrix, translation):
unit = None
if any([hasattr(translation, 'unit'), hasattr(matrix, 'unit')]):
if not all([hasattr(translation, 'unit'), hasattr(matrix, 'unit')]):
raise ValueError("To use AffineTransformation with quantities, "
"both matrix and unit need to be quantities.")
unit = translation.unit
# matrix should have the same units as translation
if not (matrix.unit / translation.unit) == u.dimensionless_unscaled:
raise ValueError("matrix and translation must have the same units.")
augmented_matrix = np.empty((3, 3), dtype=float)
augmented_matrix[0:2, 0:2] = matrix
augmented_matrix[0:2, 2:].flat = translation
augmented_matrix[2] = [0, 0, 1]
if unit is not None:
return augmented_matrix * unit
else:
return augmented_matrix
@property
def input_units(self):
if self.translation.unit is None and self.matrix.unit is None:
return None
elif self.translation.unit is not None:
return {'x': self.translation.unit,
'y': self.translation.unit
}
else:
return {'x': self.matrix.unit,
'y': self.matrix.unit
}
|
8eeb50b87e289bb27f4a348c410d5cd4d8168dc0868c131dc3f607939a85fde3 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A "grab bag" of relatively small general-purpose utilities that don't have
a clear module/package to live in.
"""
import abc
import contextlib
import difflib
import inspect
import json
import os
import signal
import sys
import traceback
import unicodedata
import locale
import threading
import re
import urllib.request
from itertools import zip_longest
from contextlib import contextmanager
from collections import defaultdict, OrderedDict
__all__ = ['isiterable', 'silence', 'format_exception', 'NumpyRNGContext',
'find_api_page', 'is_path_hidden', 'walk_skip_hidden',
'JsonCustomEncoder', 'indent', 'InheritDocstrings',
'OrderedDescriptor', 'OrderedDescriptorContainer', 'set_locale',
'ShapedLikeNDArray', 'check_broadcast', 'IncompatibleShapeError',
'dtype_bytes_or_chars']
def isiterable(obj):
"""Returns `True` if the given object is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
def indent(s, shift=1, width=4):
"""Indent a block of text. The indentation is applied to each line."""
indented = '\n'.join(' ' * (width * shift) + l if l else ''
for l in s.splitlines())
if s[-1] == '\n':
indented += '\n'
return indented
class _DummyFile:
"""A noop writeable object."""
def write(self, s):
pass
@contextlib.contextmanager
def silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
yield
sys.stdout = old_stdout
sys.stderr = old_stderr
def format_exception(msg, *args, **kwargs):
"""
Given an exception message string, uses new-style formatting arguments
``{filename}``, ``{lineno}``, ``{func}`` and/or ``{text}`` to fill in
information about the exception that occurred. For example:
try:
1/0
except:
raise ZeroDivisionError(
format_except('A divide by zero occurred in {filename} at '
'line {lineno} of function {func}.'))
Any additional positional or keyword arguments passed to this function are
also used to format the message.
.. note::
This uses `sys.exc_info` to gather up the information needed to fill
in the formatting arguments. Since `sys.exc_info` is not carried
outside a handled exception, it's not wise to use this
outside of an ``except`` clause - if it is, this will substitute
'<unkown>' for the 4 formatting arguments.
"""
tb = traceback.extract_tb(sys.exc_info()[2], limit=1)
if len(tb) > 0:
filename, lineno, func, text = tb[0]
else:
filename = lineno = func = text = '<unknown>'
return msg.format(*args, filename=filename, lineno=lineno, func=func,
text=text, **kwargs)
class NumpyRNGContext:
"""
A context manager (for use with the ``with`` statement) that will seed the
numpy random number generator (RNG) to a specific value, and then restore
the RNG state back to whatever it was before.
This is primarily intended for use in the astropy testing suit, but it
may be useful in ensuring reproducibility of Monte Carlo simulations in a
science context.
Parameters
----------
seed : int
The value to use to seed the numpy RNG
Examples
--------
A typical use case might be::
with NumpyRNGContext(<some seed value you pick>):
from numpy import random
randarr = random.randn(100)
... run your test using `randarr` ...
#Any code using numpy.random at this indent level will act just as it
#would have if it had been before the with statement - e.g. whatever
#the default seed is.
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
from numpy import random
self.startstate = random.get_state()
random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
from numpy import random
random.set_state(self.startstate)
def find_api_page(obj, version=None, openinbrowser=True, timeout=None):
"""
Determines the URL of the API page for the specified object, and
optionally open that page in a web browser.
.. note::
You must be connected to the internet for this to function even if
``openinbrowser`` is `False`, unless you provide a local version of
the documentation to ``version`` (e.g., ``file:///path/to/docs``).
Parameters
----------
obj
The object to open the docs for or its fully-qualified name
(as a str).
version : str
The doc version - either a version number like '0.1', 'dev' for
the development/latest docs, or a URL to point to a specific
location that should be the *base* of the documentation. Defaults to
latest if you are on aren't on a release, otherwise, the version you
are on.
openinbrowser : bool
If `True`, the `webbrowser` package will be used to open the doc
page in a new web browser window.
timeout : number, optional
The number of seconds to wait before timing-out the query to
the astropy documentation. If not given, the default python
stdlib timeout will be used.
Returns
-------
url : str
The loaded URL
Raises
------
ValueError
If the documentation can't be found
"""
import webbrowser
from zlib import decompress
if (not isinstance(obj, str) and
hasattr(obj, '__module__') and
hasattr(obj, '__name__')):
obj = obj.__module__ + '.' + obj.__name__
elif inspect.ismodule(obj):
obj = obj.__name__
if version is None:
from .. import version
if version.release:
version = 'v' + version.version
else:
version = 'dev'
if '://' in version:
if version.endswith('index.html'):
baseurl = version[:-10]
elif version.endswith('/'):
baseurl = version
else:
baseurl = version + '/'
elif version == 'dev' or version == 'latest':
baseurl = 'http://devdocs.astropy.org/'
else:
baseurl = 'http://docs.astropy.org/en/{vers}/'.format(vers=version)
if timeout is None:
uf = urllib.request.urlopen(baseurl + 'objects.inv')
else:
uf = urllib.request.urlopen(baseurl + 'objects.inv', timeout=timeout)
try:
oiread = uf.read()
# need to first read/remove the first four lines, which have info before
# the compressed section with the actual object inventory
idx = -1
headerlines = []
for _ in range(4):
oldidx = idx
idx = oiread.index(b'\n', oldidx + 1)
headerlines.append(oiread[(oldidx+1):idx].decode('utf-8'))
# intersphinx version line, project name, and project version
ivers, proj, vers, compr = headerlines
if 'The remainder of this file is compressed using zlib' not in compr:
raise ValueError('The file downloaded from {0} does not seem to be'
'the usual Sphinx objects.inv format. Maybe it '
'has changed?'.format(baseurl + 'objects.inv'))
compressed = oiread[(idx+1):]
finally:
uf.close()
decompressed = decompress(compressed).decode('utf-8')
resurl = None
for l in decompressed.strip().splitlines():
ls = l.split()
name = ls[0]
loc = ls[3]
if loc.endswith('$'):
loc = loc[:-1] + name
if name == obj:
resurl = baseurl + loc
break
if resurl is None:
raise ValueError('Could not find the docs for the object {obj}'.format(obj=obj))
elif openinbrowser:
webbrowser.open(resurl)
return resurl
def signal_number_to_name(signum):
"""
Given an OS signal number, returns a signal name. If the signal
number is unknown, returns ``'UNKNOWN'``.
"""
# Since these numbers and names are platform specific, we use the
# builtin signal module and build a reverse mapping.
signal_to_name_map = dict((k, v) for v, k in signal.__dict__.items()
if v.startswith('SIG'))
return signal_to_name_map.get(signum, 'UNKNOWN')
if sys.platform == 'win32':
import ctypes
def _has_hidden_attribute(filepath):
"""
Returns True if the given filepath has the hidden attribute on
MS-Windows. Based on a post here:
http://stackoverflow.com/questions/284115/cross-platform-hidden-file-detection
"""
if isinstance(filepath, bytes):
filepath = filepath.decode(sys.getfilesystemencoding())
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(filepath)
result = bool(attrs & 2) and attrs != -1
except AttributeError:
result = False
return result
else:
def _has_hidden_attribute(filepath):
return False
def is_path_hidden(filepath):
"""
Determines if a given file or directory is hidden.
Parameters
----------
filepath : str
The path to a file or directory
Returns
-------
hidden : bool
Returns `True` if the file is hidden
"""
name = os.path.basename(os.path.abspath(filepath))
if isinstance(name, bytes):
is_dotted = name.startswith(b'.')
else:
is_dotted = name.startswith('.')
return is_dotted or _has_hidden_attribute(filepath)
def walk_skip_hidden(top, onerror=None, followlinks=False):
"""
A wrapper for `os.walk` that skips hidden files and directories.
This function does not have the parameter ``topdown`` from
`os.walk`: the directories must always be recursed top-down when
using this function.
See also
--------
os.walk : For a description of the parameters
"""
for root, dirs, files in os.walk(
top, topdown=True, onerror=onerror,
followlinks=followlinks):
# These lists must be updated in-place so os.walk will skip
# hidden directories
dirs[:] = [d for d in dirs if not is_path_hidden(d)]
files[:] = [f for f in files if not is_path_hidden(f)]
yield root, dirs, files
class JsonCustomEncoder(json.JSONEncoder):
"""Support for data types that JSON default encoder
does not do.
This includes:
* Numpy array or number
* Complex number
* Set
* Bytes
* astropy.UnitBase
* astropy.Quantity
Examples
--------
>>> import json
>>> import numpy as np
>>> from astropy.utils.misc import JsonCustomEncoder
>>> json.dumps(np.arange(3), cls=JsonCustomEncoder)
'[0, 1, 2]'
"""
def default(self, obj):
from .. import units as u
import numpy as np
if isinstance(obj, u.Quantity):
return dict(value=obj.value, unit=obj.unit.to_string())
if isinstance(obj, (np.number, np.ndarray)):
return obj.tolist()
elif isinstance(obj, complex):
return [obj.real, obj.imag]
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, bytes): # pragma: py3
return obj.decode()
elif isinstance(obj, (u.UnitBase, u.FunctionUnitBase)):
if obj == u.dimensionless_unscaled:
obj = 'dimensionless_unit'
else:
return obj.to_string()
return json.JSONEncoder.default(self, obj)
def strip_accents(s):
"""
Remove accents from a Unicode string.
This helps with matching "ångström" to "angstrom", for example.
"""
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def did_you_mean(s, candidates, n=3, cutoff=0.8, fix=None):
"""
When a string isn't found in a set of candidates, we can be nice
to provide a list of alternatives in the exception. This
convenience function helps to format that part of the exception.
Parameters
----------
s : str
candidates : sequence of str or dict of str keys
n : int
The maximum number of results to include. See
`difflib.get_close_matches`.
cutoff : float
In the range [0, 1]. Possibilities that don't score at least
that similar to word are ignored. See
`difflib.get_close_matches`.
fix : callable
A callable to modify the results after matching. It should
take a single string and return a sequence of strings
containing the fixed matches.
Returns
-------
message : str
Returns the string "Did you mean X, Y, or Z?", or the empty
string if no alternatives were found.
"""
if isinstance(s, str):
s = strip_accents(s)
s_lower = s.lower()
# Create a mapping from the lower case name to all capitalization
# variants of that name.
candidates_lower = {}
for candidate in candidates:
candidate_lower = candidate.lower()
candidates_lower.setdefault(candidate_lower, [])
candidates_lower[candidate_lower].append(candidate)
# The heuristic here is to first try "singularizing" the word. If
# that doesn't match anything use difflib to find close matches in
# original, lower and upper case.
if s_lower.endswith('s') and s_lower[:-1] in candidates_lower:
matches = [s_lower[:-1]]
else:
matches = difflib.get_close_matches(
s_lower, candidates_lower, n=n, cutoff=cutoff)
if len(matches):
capitalized_matches = set()
for match in matches:
capitalized_matches.update(candidates_lower[match])
matches = capitalized_matches
if fix is not None:
mapped_matches = []
for match in matches:
mapped_matches.extend(fix(match))
matches = mapped_matches
matches = list(set(matches))
matches = sorted(matches)
if len(matches) == 1:
matches = matches[0]
else:
matches = (', '.join(matches[:-1]) + ' or ' +
matches[-1])
return 'Did you mean {0}?'.format(matches)
return ''
class InheritDocstrings(type):
"""
This metaclass makes methods of a class automatically have their
docstrings filled in from the methods they override in the base
class.
If the class uses multiple inheritance, the docstring will be
chosen from the first class in the bases list, in the same way as
methods are normally resolved in Python. If this results in
selecting the wrong docstring, the docstring will need to be
explicitly included on the method.
For example::
>>> from astropy.utils.misc import InheritDocstrings
>>> class A(metaclass=InheritDocstrings):
... def wiggle(self):
... "Wiggle the thingamajig"
... pass
>>> class B(A):
... def wiggle(self):
... pass
>>> B.wiggle.__doc__
u'Wiggle the thingamajig'
"""
def __init__(cls, name, bases, dct):
def is_public_member(key):
return (
(key.startswith('__') and key.endswith('__')
and len(key) > 4) or
not key.startswith('_'))
for key, val in dct.items():
if ((inspect.isfunction(val) or inspect.isdatadescriptor(val)) and
is_public_member(key) and
val.__doc__ is None):
for base in cls.__mro__[1:]:
super_method = getattr(base, key, None)
if super_method is not None:
val.__doc__ = super_method.__doc__
break
super().__init__(name, bases, dct)
class OrderedDescriptor(metaclass=abc.ABCMeta):
"""
Base class for descriptors whose order in the class body should be
preserved. Intended for use in concert with the
`OrderedDescriptorContainer` metaclass.
Subclasses of `OrderedDescriptor` must define a value for a class attribute
called ``_class_attribute_``. This is the name of a class attribute on the
*container* class for these descriptors, which will be set to an
`~collections.OrderedDict` at class creation time. This
`~collections.OrderedDict` will contain a mapping of all class attributes
that were assigned instances of the `OrderedDescriptor` subclass, to the
instances themselves. See the documentation for
`OrderedDescriptorContainer` for a concrete example.
Optionally, subclasses of `OrderedDescriptor` may define a value for a
class attribute called ``_name_attribute_``. This should be the name of
an attribute on instances of the subclass. When specified, during
creation of a class containing these descriptors, the name attribute on
each instance will be set to the name of the class attribute it was
assigned to on the class.
.. note::
Although this class is intended for use with *descriptors* (i.e.
classes that define any of the ``__get__``, ``__set__``, or
``__delete__`` magic methods), this base class is not itself a
descriptor, and technically this could be used for classes that are
not descriptors too. However, use with descriptors is the original
intended purpose.
"""
# This id increments for each OrderedDescriptor instance created, so they
# are always ordered in the order they were created. Class bodies are
# guaranteed to be executed from top to bottom. Not sure if this is
# thread-safe though.
_nextid = 1
@property
@abc.abstractmethod
def _class_attribute_(self):
"""
Subclasses should define this attribute to the name of an attribute on
classes containing this subclass. That attribute will contain the mapping
of all instances of that `OrderedDescriptor` subclass defined in the class
body. If the same descriptor needs to be used with different classes,
each with different names of this attribute, multiple subclasses will be
needed.
"""
_name_attribute_ = None
"""
Subclasses may optionally define this attribute to specify the name of an
attribute on instances of the class that should be filled with the
instance's attribute name at class creation time.
"""
def __init__(self, *args, **kwargs):
# The _nextid attribute is shared across all subclasses so that
# different subclasses of OrderedDescriptors can be sorted correctly
# between themselves
self.__order = OrderedDescriptor._nextid
OrderedDescriptor._nextid += 1
super().__init__()
def __lt__(self, other):
"""
Defined for convenient sorting of `OrderedDescriptor` instances, which
are defined to sort in their creation order.
"""
if (isinstance(self, OrderedDescriptor) and
isinstance(other, OrderedDescriptor)):
try:
return self.__order < other.__order
except AttributeError:
raise RuntimeError(
'Could not determine ordering for {0} and {1}; at least '
'one of them is not calling super().__init__ in its '
'__init__.'.format(self, other))
else:
return NotImplemented
class OrderedDescriptorContainer(type):
"""
Classes should use this metaclass if they wish to use `OrderedDescriptor`
attributes, which are class attributes that "remember" the order in which
they were defined in the class body.
Every subclass of `OrderedDescriptor` has an attribute called
``_class_attribute_``. For example, if we have
.. code:: python
class ExampleDecorator(OrderedDescriptor):
_class_attribute_ = '_examples_'
Then when a class with the `OrderedDescriptorContainer` metaclass is
created, it will automatically be assigned a class attribute ``_examples_``
referencing an `~collections.OrderedDict` containing all instances of
``ExampleDecorator`` defined in the class body, mapped to by the names of
the attributes they were assigned to.
When subclassing a class with this metaclass, the descriptor dict (i.e.
``_examples_`` in the above example) will *not* contain descriptors
inherited from the base class. That is, this only works by default with
decorators explicitly defined in the class body. However, the subclass
*may* define an attribute ``_inherit_decorators_`` which lists
`OrderedDescriptor` classes that *should* be added from base classes.
See the examples section below for an example of this.
Examples
--------
>>> from astropy.utils import OrderedDescriptor, OrderedDescriptorContainer
>>> class TypedAttribute(OrderedDescriptor):
... \"\"\"
... Attributes that may only be assigned objects of a specific type,
... or subclasses thereof. For some reason we care about their order.
... \"\"\"
...
... _class_attribute_ = 'typed_attributes'
... _name_attribute_ = 'name'
... # A default name so that instances not attached to a class can
... # still be repr'd; useful for debugging
... name = '<unbound>'
...
... def __init__(self, type):
... # Make sure not to forget to call the super __init__
... super().__init__()
... self.type = type
...
... def __get__(self, obj, objtype=None):
... if obj is None:
... return self
... if self.name in obj.__dict__:
... return obj.__dict__[self.name]
... else:
... raise AttributeError(self.name)
...
... def __set__(self, obj, value):
... if not isinstance(value, self.type):
... raise ValueError('{0}.{1} must be of type {2!r}'.format(
... obj.__class__.__name__, self.name, self.type))
... obj.__dict__[self.name] = value
...
... def __delete__(self, obj):
... if self.name in obj.__dict__:
... del obj.__dict__[self.name]
... else:
... raise AttributeError(self.name)
...
... def __repr__(self):
... if isinstance(self.type, tuple) and len(self.type) > 1:
... typestr = '({0})'.format(
... ', '.join(t.__name__ for t in self.type))
... else:
... typestr = self.type.__name__
... return '<{0}(name={1}, type={2})>'.format(
... self.__class__.__name__, self.name, typestr)
...
Now let's create an example class that uses this ``TypedAttribute``::
>>> class Point2D(metaclass=OrderedDescriptorContainer):
... x = TypedAttribute((float, int))
... y = TypedAttribute((float, int))
...
... def __init__(self, x, y):
... self.x, self.y = x, y
...
>>> p1 = Point2D(1.0, 2.0)
>>> p1.x
1.0
>>> p1.y
2.0
>>> p2 = Point2D('a', 'b') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Point2D.x must be of type (float, int>)
We see that ``TypedAttribute`` works more or less as advertised, but
there's nothing special about that. Let's see what
`OrderedDescriptorContainer` did for us::
>>> Point2D.typed_attributes
OrderedDict([('x', <TypedAttribute(name=x, type=(float, int))>),
('y', <TypedAttribute(name=y, type=(float, int))>)])
If we create a subclass, it does *not* by default add inherited descriptors
to ``typed_attributes``::
>>> class Point3D(Point2D):
... z = TypedAttribute((float, int))
...
>>> Point3D.typed_attributes
OrderedDict([('z', <TypedAttribute(name=z, type=(float, int))>)])
However, if we specify ``_inherit_descriptors_`` from ``Point2D`` then
it will do so::
>>> class Point3D(Point2D):
... _inherit_descriptors_ = (TypedAttribute,)
... z = TypedAttribute((float, int))
...
>>> Point3D.typed_attributes
OrderedDict([('x', <TypedAttribute(name=x, type=(float, int))>),
('y', <TypedAttribute(name=y, type=(float, int))>),
('z', <TypedAttribute(name=z, type=(float, int))>)])
.. note::
Hopefully it is clear from these examples that this construction
also allows a class of type `OrderedDescriptorContainer` to use
multiple different `OrderedDescriptor` classes simultaneously.
"""
_inherit_descriptors_ = ()
def __init__(cls, cls_name, bases, members):
descriptors = defaultdict(list)
seen = set()
inherit_descriptors = ()
descr_bases = {}
for mro_cls in cls.__mro__:
for name, obj in mro_cls.__dict__.items():
if name in seen:
# Checks if we've already seen an attribute of the given
# name (if so it will override anything of the same name in
# any base class)
continue
seen.add(name)
if (not isinstance(obj, OrderedDescriptor) or
(inherit_descriptors and
not isinstance(obj, inherit_descriptors))):
# The second condition applies when checking any
# subclasses, to see if we can inherit any descriptors of
# the given type from subclasses (by default inheritance is
# disabled unless the class has _inherit_descriptors_
# defined)
continue
if obj._name_attribute_ is not None:
setattr(obj, obj._name_attribute_, name)
# Don't just use the descriptor's class directly; instead go
# through its MRO and find the class on which _class_attribute_
# is defined directly. This way subclasses of some
# OrderedDescriptor *may* override _class_attribute_ and have
# its own _class_attribute_, but by default all subclasses of
# some OrderedDescriptor are still grouped together
# TODO: It might be worth clarifying this in the docs
if obj.__class__ not in descr_bases:
for obj_cls_base in obj.__class__.__mro__:
if '_class_attribute_' in obj_cls_base.__dict__:
descr_bases[obj.__class__] = obj_cls_base
descriptors[obj_cls_base].append((obj, name))
break
else:
# Make sure to put obj first for sorting purposes
obj_cls_base = descr_bases[obj.__class__]
descriptors[obj_cls_base].append((obj, name))
if not getattr(mro_cls, '_inherit_descriptors_', False):
# If _inherit_descriptors_ is undefined then we don't inherit
# any OrderedDescriptors from any of the base classes, and
# there's no reason to continue through the MRO
break
else:
inherit_descriptors = mro_cls._inherit_descriptors_
for descriptor_cls, instances in descriptors.items():
instances.sort()
instances = OrderedDict((key, value) for value, key in instances)
setattr(cls, descriptor_cls._class_attribute_, instances)
super().__init__(cls_name, bases, members)
LOCALE_LOCK = threading.Lock()
@contextmanager
def set_locale(name):
"""
Context manager to temporarily set the locale to ``name``.
An example is setting locale to "C" so that the C strtod()
function will use "." as the decimal point to enable consistent
numerical string parsing.
Note that one cannot nest multiple set_locale() context manager
statements as this causes a threading lock.
This code taken from https://stackoverflow.com/questions/18593661/how-do-i-strftime-a-date-object-in-a-different-locale.
Parameters
==========
name : str
Locale name, e.g. "C" or "fr_FR".
"""
name = str(name)
with LOCALE_LOCK:
saved = locale.setlocale(locale.LC_ALL)
if saved == name:
# Don't do anything if locale is already the requested locale
yield
else:
try:
locale.setlocale(locale.LC_ALL, name)
yield
finally:
locale.setlocale(locale.LC_ALL, saved)
class ShapedLikeNDArray(metaclass=abc.ABCMeta):
"""Mixin class to provide shape-changing methods.
The class proper is assumed to have some underlying data, which are arrays
or array-like structures. It must define a ``shape`` property, which gives
the shape of those data, as well as an ``_apply`` method that creates a new
instance in which a `~numpy.ndarray` method has been applied to those.
Furthermore, for consistency with `~numpy.ndarray`, it is recommended to
define a setter for the ``shape`` property, which, like the
`~numpy.ndarray.shape` property allows in-place reshaping the internal data
(and, unlike the ``reshape`` method raises an exception if this is not
possible).
This class also defines default implementations for ``ndim`` and ``size``
properties, calculating those from the ``shape``. These can be overridden
by subclasses if there are faster ways to obtain those numbers.
"""
# Note to developers: if new methods are added here, be sure to check that
# they work properly with the classes that use this, such as Time and
# BaseRepresentation, i.e., look at their ``_apply`` methods and add
# relevant tests. This is particularly important for methods that imply
# copies rather than views of data (see the special-case treatment of
# 'flatten' in Time).
@property
@abc.abstractmethod
def shape(self):
"""The shape of the instance and underlying arrays."""
@abc.abstractmethod
def _apply(method, *args, **kwargs):
"""Create a new instance, with ``method`` applied to underlying data.
The method is any of the shape-changing methods for `~numpy.ndarray`
(``reshape``, ``swapaxes``, etc.), as well as those picking particular
elements (``__getitem__``, ``take``, etc.). It will be applied to the
underlying arrays (e.g., ``jd1`` and ``jd2`` in `~astropy.time.Time`),
with the results used to create a new instance.
Parameters
----------
method : str
Method to be applied to the instance's internal data arrays.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``.
"""
@property
def ndim(self):
"""The number of dimensions of the instance and underlying arrays."""
return len(self.shape)
@property
def size(self):
"""The size of the object, as calculated from its shape."""
size = 1
for sh in self.shape:
size *= sh
return size
@property
def isscalar(self):
return self.shape == ()
def __len__(self):
if self.isscalar:
raise TypeError("Scalar {0!r} object has no len()"
.format(self.__class__.__name__))
return self.shape[0]
def __bool__(self):
"""Any instance should evaluate to True, except when it is empty."""
return self.size > 0
def __getitem__(self, item):
try:
return self._apply('__getitem__', item)
except IndexError:
if self.isscalar:
raise TypeError('scalar {0!r} object is not subscriptable.'
.format(self.__class__.__name__))
else:
raise
def __iter__(self):
if self.isscalar:
raise TypeError('scalar {0!r} object is not iterable.'
.format(self.__class__.__name__))
# We cannot just write a generator here, since then the above error
# would only be raised once we try to use the iterator, rather than
# upon its definition using iter(self).
def self_iter():
for idx in range(len(self)):
yield self[idx]
return self_iter()
def copy(self, *args, **kwargs):
"""Return an instance containing copies of the internal data.
Parameters are as for :meth:`~numpy.ndarray.copy`.
"""
return self._apply('copy', *args, **kwargs)
def reshape(self, *args, **kwargs):
"""Returns an instance containing the same data with a new shape.
Parameters are as for :meth:`~numpy.ndarray.reshape`. Note that it is
not always possible to change the shape of an array without copying the
data (see :func:`~numpy.reshape` documentation). If you want an error
to be raise if the data is copied, you should assign the new shape to
the shape attribute (note: this may not be implemented for all classes
using ``ShapedLikeNDArray``).
"""
return self._apply('reshape', *args, **kwargs)
def ravel(self, *args, **kwargs):
"""Return an instance with the array collapsed into one dimension.
Parameters are as for :meth:`~numpy.ndarray.ravel`. Note that it is
not always possible to unravel an array without copying the data.
If you want an error to be raise if the data is copied, you should
should assign shape ``(-1,)`` to the shape attribute.
"""
return self._apply('ravel', *args, **kwargs)
def flatten(self, *args, **kwargs):
"""Return a copy with the array collapsed into one dimension.
Parameters are as for :meth:`~numpy.ndarray.flatten`.
"""
return self._apply('flatten', *args, **kwargs)
def transpose(self, *args, **kwargs):
"""Return an instance with the data transposed.
Parameters are as for :meth:`~numpy.ndarray.transpose`. All internal
data are views of the data of the original.
"""
return self._apply('transpose', *args, **kwargs)
@property
def T(self):
"""Return an instance with the data transposed.
Parameters are as for :attr:`~numpy.ndarray.T`. All internal
data are views of the data of the original.
"""
if self.ndim < 2:
return self
else:
return self.transpose()
def swapaxes(self, *args, **kwargs):
"""Return an instance with the given axes interchanged.
Parameters are as for :meth:`~numpy.ndarray.swapaxes`:
``axis1, axis2``. All internal data are views of the data of the
original.
"""
return self._apply('swapaxes', *args, **kwargs)
def diagonal(self, *args, **kwargs):
"""Return an instance with the specified diagonals.
Parameters are as for :meth:`~numpy.ndarray.diagonal`. All internal
data are views of the data of the original.
"""
return self._apply('diagonal', *args, **kwargs)
def squeeze(self, *args, **kwargs):
"""Return an instance with single-dimensional shape entries removed
Parameters are as for :meth:`~numpy.ndarray.squeeze`. All internal
data are views of the data of the original.
"""
return self._apply('squeeze', *args, **kwargs)
def take(self, indices, axis=None, mode='raise'):
"""Return a new instance formed from the elements at the given indices.
Parameters are as for :meth:`~numpy.ndarray.take`, except that,
obviously, no output array can be given.
"""
return self._apply('take', indices, axis=axis, mode=mode)
class IncompatibleShapeError(ValueError):
def __init__(self, shape_a, shape_a_idx, shape_b, shape_b_idx):
super().__init__(shape_a, shape_a_idx, shape_b, shape_b_idx)
def check_broadcast(*shapes):
"""
Determines whether two or more Numpy arrays can be broadcast with each
other based on their shape tuple alone.
Parameters
----------
*shapes : tuple
All shapes to include in the comparison. If only one shape is given it
is passed through unmodified. If no shapes are given returns an empty
`tuple`.
Returns
-------
broadcast : `tuple`
If all shapes are mutually broadcastable, returns a tuple of the full
broadcast shape.
"""
if len(shapes) == 0:
return ()
elif len(shapes) == 1:
return shapes[0]
reversed_shapes = (reversed(shape) for shape in shapes)
full_shape = []
for dims in zip_longest(*reversed_shapes, fillvalue=1):
max_dim = 1
max_dim_idx = None
for idx, dim in enumerate(dims):
if dim == 1:
continue
if max_dim == 1:
# The first dimension of size greater than 1
max_dim = dim
max_dim_idx = idx
elif dim != max_dim:
raise IncompatibleShapeError(
shapes[max_dim_idx], max_dim_idx, shapes[idx], idx)
full_shape.append(max_dim)
return tuple(full_shape[::-1])
def dtype_bytes_or_chars(dtype):
"""
Parse the number out of a dtype.str value like '<U5' or '<f8'.
See #5819 for discussion on the need for this function for getting
the number of characters corresponding to a string dtype.
Parameters
----------
dtype : numpy dtype object
Input dtype
Returns
-------
bytes_or_chars : int or None
Bits (for numeric types) or characters (for string types)
"""
match = re.search(r'(\d+)$', dtype.str)
out = int(match.group(1)) if match else None
return out
|
c9748a9148b2f86e87abb7ea5d90699c26e7584d239ebd212ddf09ae6825d2e5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from .. import units as u
from .wcs import WCS, WCSSUB_CELESTIAL
__doctest_skip__ = ['wcs_to_celestial_frame', 'celestial_frame_to_wcs']
__all__ = ['add_stokes_axis_to_wcs', 'celestial_frame_to_wcs',
'wcs_to_celestial_frame', 'proj_plane_pixel_scales',
'proj_plane_pixel_area', 'is_proj_plane_distorted',
'non_celestial_pixel_scales', 'skycoord_to_pixel',
'pixel_to_skycoord', 'custom_wcs_to_frame_mappings',
'custom_frame_to_wcs_mappings']
def add_stokes_axis_to_wcs(wcs, add_before_ind):
"""
Add a new Stokes axis that is uncorrelated with any other axes.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to add to
add_before_ind : int
Index of the WCS to insert the new Stokes axis in front of.
To add at the end, do add_before_ind = wcs.wcs.naxis
The beginning is at position 0.
Returns
-------
A new `~astropy.wcs.WCS` instance with an additional axis
"""
inds = [i + 1 for i in range(wcs.wcs.naxis)]
inds.insert(add_before_ind, 0)
newwcs = wcs.sub(inds)
newwcs.wcs.ctype[add_before_ind] = 'STOKES'
newwcs.wcs.cname[add_before_ind] = 'STOKES'
return newwcs
def _wcs_to_celestial_frame_builtin(wcs):
# Import astropy.coordinates here to avoid circular imports
from ..coordinates import FK4, FK4NoETerms, FK5, ICRS, ITRS, Galactic
# Import astropy.time here otherwise setup.py fails before extensions are compiled
from ..time import Time
# Keep only the celestial part of the axes
wcs = wcs.sub([WCSSUB_CELESTIAL])
if wcs.wcs.lng == -1 or wcs.wcs.lat == -1:
return None
radesys = wcs.wcs.radesys
if np.isnan(wcs.wcs.equinox):
equinox = None
else:
equinox = wcs.wcs.equinox
xcoord = wcs.wcs.ctype[0][:4]
ycoord = wcs.wcs.ctype[1][:4]
# Apply logic from FITS standard to determine the default radesys
if radesys == '' and xcoord == 'RA--' and ycoord == 'DEC-':
if equinox is None:
radesys = "ICRS"
elif equinox < 1984.:
radesys = "FK4"
else:
radesys = "FK5"
if radesys == 'FK4':
if equinox is not None:
equinox = Time(equinox, format='byear')
frame = FK4(equinox=equinox)
elif radesys == 'FK4-NO-E':
if equinox is not None:
equinox = Time(equinox, format='byear')
frame = FK4NoETerms(equinox=equinox)
elif radesys == 'FK5':
if equinox is not None:
equinox = Time(equinox, format='jyear')
frame = FK5(equinox=equinox)
elif radesys == 'ICRS':
frame = ICRS()
else:
if xcoord == 'GLON' and ycoord == 'GLAT':
frame = Galactic()
elif xcoord == 'TLON' and ycoord == 'TLAT':
frame = ITRS(obstime=wcs.wcs.dateobs or None)
else:
frame = None
return frame
def _celestial_frame_to_wcs_builtin(frame, projection='TAN'):
# Import astropy.coordinates here to avoid circular imports
from ..coordinates import BaseRADecFrame, FK4, FK4NoETerms, FK5, ICRS, ITRS, Galactic
# Create a 2-dimensional WCS
wcs = WCS(naxis=2)
if isinstance(frame, BaseRADecFrame):
xcoord = 'RA--'
ycoord = 'DEC-'
if isinstance(frame, ICRS):
wcs.wcs.radesys = 'ICRS'
elif isinstance(frame, FK4NoETerms):
wcs.wcs.radesys = 'FK4-NO-E'
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK4):
wcs.wcs.radesys = 'FK4'
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK5):
wcs.wcs.radesys = 'FK5'
wcs.wcs.equinox = frame.equinox.jyear
else:
return None
elif isinstance(frame, Galactic):
xcoord = 'GLON'
ycoord = 'GLAT'
elif isinstance(frame, ITRS):
xcoord = 'TLON'
ycoord = 'TLAT'
wcs.wcs.radesys = 'ITRS'
wcs.wcs.dateobs = frame.obstime.utc.isot
else:
return None
wcs.wcs.ctype = [xcoord + '-' + projection, ycoord + '-' + projection]
return wcs
WCS_FRAME_MAPPINGS = [[_wcs_to_celestial_frame_builtin]]
FRAME_WCS_MAPPINGS = [[_celestial_frame_to_wcs_builtin]]
class custom_wcs_to_frame_mappings:
def __init__(self, mappings=[]):
if hasattr(mappings, '__call__'):
mappings = [mappings]
WCS_FRAME_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
WCS_FRAME_MAPPINGS.pop()
# Backward-compatibility
custom_frame_mappings = custom_wcs_to_frame_mappings
class custom_frame_to_wcs_mappings:
def __init__(self, mappings=[]):
if hasattr(mappings, '__call__'):
mappings = [mappings]
FRAME_WCS_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
FRAME_WCS_MAPPINGS.pop()
def wcs_to_celestial_frame(wcs):
"""
For a given WCS, return the coordinate frame that matches the celestial
component of the WCS.
Parameters
----------
wcs : :class:`~astropy.wcs.WCS` instance
The WCS to find the frame for
Returns
-------
frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame`
subclass instance that best matches the specified WCS.
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a :class:`~astropy.wcs.WCS`
instance and should return either an instance of a frame, or `None` if no
matching frame was found. You can register this function temporarily with::
>>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_wcs_to_frame_mappings
>>> with custom_wcs_to_frame_mappings(my_function):
... wcs_to_celestial_frame(...)
"""
for mapping_set in WCS_FRAME_MAPPINGS:
for func in mapping_set:
frame = func(wcs)
if frame is not None:
return frame
raise ValueError("Could not determine celestial frame corresponding to "
"the specified WCS object")
def celestial_frame_to_wcs(frame, projection='TAN'):
"""
For a given coordinate frame, return the corresponding WCS object.
Note that the returned WCS object has only the elements corresponding to
coordinate frames set (e.g. ctype, equinox, radesys).
Parameters
----------
frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame`
subclass instance for which to find the WCS
projection : str
Projection code to use in ctype, if applicable
Returns
-------
wcs : :class:`~astropy.wcs.WCS` instance
The corresponding WCS object
Examples
--------
::
>>> from astropy.wcs.utils import celestial_frame_to_wcs
>>> from astropy.coordinates import FK5
>>> frame = FK5(equinox='J2010')
>>> wcs = celestial_frame_to_wcs(frame)
>>> wcs.to_header()
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection
CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection
CRVAL1 = 0.0 / [deg] Coordinate value at reference point
CRVAL2 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
RADESYS = 'FK5' / Equatorial coordinate system
EQUINOX = 2010.0 / [yr] Equinox of equatorial coordinates
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a
:class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass
instance and a projection (given as a string) and should return either a WCS
instance, or `None` if the WCS could not be determined. You can register
this function temporarily with::
>>> from astropy.wcs.utils import celestial_frame_to_wcs, custom_frame_to_wcs_mappings
>>> with custom_frame_to_wcs_mappings(my_function):
... celestial_frame_to_wcs(...)
"""
for mapping_set in FRAME_WCS_MAPPINGS:
for func in mapping_set:
wcs = func(frame, projection=projection)
if wcs is not None:
return wcs
raise ValueError("Could not determine WCS corresponding to the specified "
"coordinate frame.")
def proj_plane_pixel_scales(wcs):
"""
For a WCS returns pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <http://adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the scales corresponding to celestial axes only,
make sure that the input `~astropy.wcs.WCS` object contains
celestial axes only, e.g., by passing in the
`~astropy.wcs.WCS.celestial` WCS object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
scale : `~numpy.ndarray`
A vector (`~numpy.ndarray`) of projection plane increments
corresponding to each pixel side (axis). The units of the returned
results are the same as the units of `~astropy.wcs.Wcsprm.cdelt`,
`~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for
the celestial WCS and can be obtained by inquiring the value
of `~astropy.wcs.Wcsprm.cunit` property of the input
`~astropy.wcs.WCS` WCS object.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
"""
return np.sqrt((wcs.pixel_scale_matrix**2).sum(axis=0, dtype=float))
def proj_plane_pixel_area(wcs):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`) returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <http://adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the area of pixels corresponding to celestial
axes only, this function uses the `~astropy.wcs.WCS.celestial` WCS
object of the input ``wcs``. This is different from the
`~astropy.wcs.utils.proj_plane_pixel_scales` function
that computes the scales for the axes of the input WCS itself.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
area : float
Area (in the projection plane) of the pixel at ``CRPIX`` location.
The units of the returned result are the same as the units of
the `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`,
and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be
obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit`
property of the `~astropy.wcs.WCS.celestial` WCS object.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
"""
psm = wcs.celestial.pixel_scale_matrix
if psm.shape != (2, 2):
raise ValueError("Pixel area is defined only for 2D pixels.")
return np.abs(np.linalg.det(psm))
def is_proj_plane_distorted(wcs, maxerr=1.0e-5):
r"""
For a WCS returns `False` if square image (detector) pixels stay square
when projected onto the "plane of intermediate world coordinates"
as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <http://adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
It will return `True` if transformation from image (detector) coordinates
to the focal plane coordinates is non-orthogonal or if WCS contains
non-linear (e.g., SIP) distortions.
.. note::
Since this function is concerned **only** about the transformation
"image plane"->"focal plane" and **not** about the transformation
"celestial sphere"->"focal plane"->"image plane",
this function ignores distortions arising due to non-linear nature
of most projections.
Let's denote by *C* either the original or the reconstructed
(from ``PC`` and ``CDELT``) CD matrix. `is_proj_plane_distorted`
verifies that the transformation from image (detector) coordinates
to the focal plane coordinates is orthogonal using the following
check:
.. math::
\left \| \frac{C \cdot C^{\mathrm{T}}}
{| det(C)|} - I \right \|_{\mathrm{max}} < \epsilon .
Parameters
----------
wcs : `~astropy.wcs.WCS`
World coordinate system object
maxerr : float, optional
Accuracy to which the CD matrix, **normalized** such
that :math:`|det(CD)|=1`, should be close to being an
orthogonal matrix as described in the above equation
(see :math:`\epsilon`).
Returns
-------
distorted : bool
Returns `True` if focal (projection) plane is distorted and `False`
otherwise.
"""
cwcs = wcs.celestial
return (not _is_cd_orthogonal(cwcs.pixel_scale_matrix, maxerr) or
_has_distortion(cwcs))
def _is_cd_orthogonal(cd, maxerr):
shape = cd.shape
if not (len(shape) == 2 and shape[0] == shape[1]):
raise ValueError("CD (or PC) matrix must be a 2D square matrix.")
pixarea = np.abs(np.linalg.det(cd))
if (pixarea == 0.0):
raise ValueError("CD (or PC) matrix is singular.")
# NOTE: Technically, below we should use np.dot(cd, np.conjugate(cd.T))
# However, I am not aware of complex CD/PC matrices...
I = np.dot(cd, cd.T) / pixarea
cd_unitary_err = np.amax(np.abs(I - np.eye(shape[0])))
return (cd_unitary_err < maxerr)
def non_celestial_pixel_scales(inwcs):
"""
Calculate the pixel scale along each axis of a non-celestial WCS,
for example one with mixed spectral and spatial axes.
Parameters
----------
inwcs : `~astropy.wcs.WCS`
The world coordinate system object.
Returns
-------
scale : `numpy.ndarray`
The pixel scale along each axis.
"""
if inwcs.is_celestial:
raise ValueError("WCS is celestial, use celestial_pixel_scales instead")
pccd = inwcs.pixel_scale_matrix
if np.allclose(np.extract(1-np.eye(*pccd.shape), pccd), 0):
return np.abs(np.diagonal(pccd))*u.deg
else:
raise ValueError("WCS is rotated, cannot determine consistent pixel scales")
def _has_distortion(wcs):
"""
`True` if contains any SIP or image distortion components.
"""
return any(getattr(wcs, dist_attr) is not None
for dist_attr in ['cpdis1', 'cpdis2', 'det2im1', 'det2im2', 'sip'])
# TODO: in future, we should think about how the following two functions can be
# integrated better into the WCS class.
def skycoord_to_pixel(coords, wcs, origin=0, mode='all'):
"""
Convert a set of SkyCoord coordinates into pixels.
Parameters
----------
coords : `~astropy.coordinates.SkyCoord`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_CELESTIAL])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS needs
xw_unit = u.Unit(wcs.wcs.cunit[0])
yw_unit = u.Unit(wcs.wcs.cunit[1])
# Convert positions to frame
coords = coords.transform_to(frame)
# Extract longitude and latitude. We first try and use lon/lat directly,
# but if the representation is not spherical or unit spherical this will
# fail. We should then force the use of the unit spherical
# representation. We don't do that directly to make sure that we preserve
# custom lon/lat representations if available.
try:
lon = coords.data.lon.to(xw_unit)
lat = coords.data.lat.to(yw_unit)
except AttributeError:
lon = coords.spherical.lon.to(xw_unit)
lat = coords.spherical.lat.to(yw_unit)
# Convert to pixel coordinates
if mode == 'all':
xp, yp = wcs.all_world2pix(lon.value, lat.value, origin)
elif mode == 'wcs':
xp, yp = wcs.wcs_world2pix(lon.value, lat.value, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
return xp, yp
def pixel_to_skycoord(xp, yp, wcs, origin=0, mode='all', cls=None):
"""
Convert a set of pixel coordinates into a `~astropy.coordinates.SkyCoord`
coordinate.
Parameters
----------
xp, yp : float or `numpy.ndarray`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
cls : class or None
The class of object to create. Should be a
`~astropy.coordinates.SkyCoord` subclass. If None, defaults to
`~astropy.coordinates.SkyCoord`.
Returns
-------
coords : Whatever ``cls`` is (a subclass of `~astropy.coordinates.SkyCoord`)
The celestial coordinates
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
# Import astropy.coordinates here to avoid circular imports
from ..coordinates import SkyCoord, UnitSphericalRepresentation
# we have to do this instead of actually setting the default to SkyCoord
# because importing SkyCoord at the module-level leads to circular
# dependencies.
if cls is None:
cls = SkyCoord
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_CELESTIAL])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS gives
lon_unit = u.Unit(wcs.wcs.cunit[0])
lat_unit = u.Unit(wcs.wcs.cunit[1])
# Convert pixel coordinates to celestial coordinates
if mode == 'all':
lon, lat = wcs.all_pix2world(xp, yp, origin)
elif mode == 'wcs':
lon, lat = wcs.wcs_pix2world(xp, yp, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
# Add units to longitude/latitude
lon = lon * lon_unit
lat = lat * lat_unit
# Create a SkyCoord-like object
data = UnitSphericalRepresentation(lon=lon, lat=lat)
coords = cls(frame.realize_frame(data))
return coords
|
519b2904ed129ea8ba591a3033ce1d25528e6d13445612b19e88ec2dfb50f368 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# The idea for this module (but no code) was borrowed from the
# quantities (http://pythonhosted.org/quantities/) package.
"""Helper functions for Quantity.
In particular, this implements the logic that determines scaling and result
units for a given ufunc, given input units.
"""
from fractions import Fraction
import numpy as np
from .core import (UnitsError, UnitConversionError, UnitTypeError,
dimensionless_unscaled, get_current_unit_registry)
def _d(unit):
if unit is None:
return dimensionless_unscaled
else:
return unit
def get_converter(from_unit, to_unit):
"""Like Unit._get_converter, except returns None if no scaling is needed,
i.e., if the inferred scale is unity."""
try:
scale = from_unit._to(to_unit)
except UnitsError:
return from_unit._apply_equivalencies(
from_unit, to_unit, get_current_unit_registry().equivalencies)
except AttributeError:
raise UnitTypeError("Unit '{0}' cannot be converted to '{1}'"
.format(from_unit, to_unit))
if scale == 1.:
return None
else:
return lambda val: scale * val
def get_converters_and_unit(f, unit1, unit2):
converters = [None, None]
# By default, we try adjusting unit2 to unit1, so that the result will
# be unit1 as well. But if there is no second unit, we have to try
# adjusting unit1 (to dimensionless, see below).
if unit2 is None:
if unit1 is None:
# No units for any input -- e.g., np.add(a1, a2, out=q)
return converters, dimensionless_unscaled
changeable = 0
# swap units.
unit2 = unit1
unit1 = None
elif unit2 is unit1:
# ensure identical units is fast ("==" is slow, so avoid that).
return converters, unit1
else:
changeable = 1
# Try to get a converter from unit2 to unit1.
if unit1 is None:
try:
converters[changeable] = get_converter(unit2,
dimensionless_unscaled)
except UnitsError:
# special case: would be OK if unitless number is zero, inf, nan
converters[1-changeable] = False
return converters, unit2
else:
return converters, dimensionless_unscaled
else:
try:
converters[changeable] = get_converter(unit2, unit1)
except UnitsError:
raise UnitConversionError(
"Can only apply '{0}' function to quantities "
"with compatible dimensions"
.format(f.__name__))
return converters, unit1
def can_have_arbitrary_unit(value):
"""Test whether the items in value can have arbitrary units
Numbers whose value does not change upon a unit change, i.e.,
zero, infinity, or not-a-number
Parameters
----------
value : number or array
Returns
-------
`True` if each member is either zero or not finite, `False` otherwise
"""
return np.all(np.logical_or(np.equal(value, 0.), ~np.isfinite(value)))
# SINGLE ARGUMENT UFUNC HELPERS
#
# The functions below take a single argument, which is the quantity upon which
# the ufunc is being used. The output of the helper function should be two
# values: a list with a single converter to be used to scale the input before
# it is being passed to the ufunc (or None if no conversion is needed), and
# the unit the output will be in.
def helper_onearg_test(f, unit):
return ([None], None)
def helper_invariant(f, unit):
return ([None], _d(unit))
def helper_sqrt(f, unit):
return ([None], unit ** Fraction(1, 2) if unit is not None
else dimensionless_unscaled)
def helper_square(f, unit):
return ([None], unit ** 2 if unit is not None else dimensionless_unscaled)
def helper_reciprocal(f, unit):
return ([None], unit ** -1 if unit is not None else dimensionless_unscaled)
def helper_cbrt(f, unit):
return ([None], (unit ** Fraction(1, 3) if unit is not None
else dimensionless_unscaled))
def helper_modf(f, unit):
if unit is None:
return [None], (dimensionless_unscaled, dimensionless_unscaled)
try:
return ([get_converter(unit, dimensionless_unscaled)],
(dimensionless_unscaled, dimensionless_unscaled))
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper__ones_like(f, unit):
return [None], dimensionless_unscaled
def helper_dimensionless_to_dimensionless(f, unit):
if unit is None:
return [None], dimensionless_unscaled
try:
return ([get_converter(unit, dimensionless_unscaled)],
dimensionless_unscaled)
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_dimensionless_to_radian(f, unit):
from .si import radian
if unit is None:
return [None], radian
try:
return [get_converter(unit, dimensionless_unscaled)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_degree_to_radian(f, unit):
from .si import degree, radian
try:
return [get_converter(unit, degree)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_degree(f, unit):
from .si import degree, radian
try:
return [get_converter(unit, radian)], degree
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_dimensionless(f, unit):
from .si import radian
try:
return [get_converter(unit, radian)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_frexp(f, unit):
if not unit.is_unity():
raise UnitTypeError("Can only apply '{0}' function to "
"unscaled dimensionless quantities"
.format(f.__name__))
return [None], (None, None)
# TWO ARGUMENT UFUNC HELPERS
#
# The functions below take a two arguments. The output of the helper function
# should be two values: a tuple of two converters to be used to scale the
# inputs before being passed to the ufunc (None if no conversion is needed),
# and the unit the output will be in.
def helper_multiplication(f, unit1, unit2):
return [None, None], _d(unit1) * _d(unit2)
def helper_division(f, unit1, unit2):
return [None, None], _d(unit1) / _d(unit2)
def helper_power(f, unit1, unit2):
# TODO: find a better way to do this, currently need to signal that one
# still needs to raise power of unit1 in main code
if unit2 is None:
return [None, None], False
try:
return [None, get_converter(unit2, dimensionless_unscaled)], False
except UnitsError:
raise UnitTypeError("Can only raise something to a "
"dimensionless quantity")
def helper_ldexp(f, unit1, unit2):
if unit2 is not None:
raise TypeError("Cannot use ldexp with a quantity "
"as second argument.")
else:
return [None, None], _d(unit1)
def helper_copysign(f, unit1, unit2):
# if first arg is not a quantity, just return plain array
if unit1 is None:
return [None, None], None
else:
return [None, None], unit1
def helper_heaviside(f, unit1, unit2):
try:
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply 'heaviside' function with a "
"dimensionless second argument.")
return ([None, converter2], dimensionless_unscaled)
def helper_two_arg_dimensionless(f, unit1, unit2):
try:
converter1 = (get_converter(unit1, dimensionless_unscaled)
if unit1 is not None else None)
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
return ([converter1, converter2], dimensionless_unscaled)
# This used to be a separate function that just called get_converters_and_unit.
# Using it directly saves a few us; keeping the clearer name.
helper_twoarg_invariant = get_converters_and_unit
def helper_twoarg_comparison(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, None
def helper_twoarg_invtrig(f, unit1, unit2):
from .si import radian
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, radian
def helper_twoarg_floor_divide(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, dimensionless_unscaled
def helper_divmod(f, unit1, unit2):
converters, result_unit = get_converters_and_unit(f, unit1, unit2)
return converters, (dimensionless_unscaled, result_unit)
def helper_degree_to_dimensionless(f, unit):
from .si import degree
try:
return [get_converter(unit, degree)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_degree_minute_second_to_radian(f, unit1, unit2, unit3):
from .si import degree, arcmin, arcsec, radian
try:
return [get_converter(unit1, degree),
get_converter(unit2, arcmin),
get_converter(unit3, arcsec)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
# list of ufuncs:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html#available-ufuncs
UFUNC_HELPERS = {}
UNSUPPORTED_UFUNCS = {
np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.invert, np.left_shift,
np.right_shift, np.logical_and, np.logical_or, np.logical_xor,
np.logical_not}
for name in 'isnat', 'gcd', 'lcm':
# isnat was introduced in numpy 1.14, gcd+lcm in 1.15
ufunc = getattr(np, name, None)
if isinstance(ufunc, np.ufunc):
UNSUPPORTED_UFUNCS |= {ufunc}
# SINGLE ARGUMENT UFUNCS
# ufuncs that return a boolean and do not care about the unit
onearg_test_ufuncs = (np.isfinite, np.isinf, np.isnan, np.sign, np.signbit)
for ufunc in onearg_test_ufuncs:
UFUNC_HELPERS[ufunc] = helper_onearg_test
# ufuncs that return a value with the same unit as the input
invariant_ufuncs = (np.absolute, np.fabs, np.conj, np.conjugate, np.negative,
np.spacing, np.rint, np.floor, np.ceil, np.trunc)
for ufunc in invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_invariant
# positive was added in numpy 1.13
if isinstance(getattr(np, 'positive', None), np.ufunc):
UFUNC_HELPERS[np.positive] = helper_invariant
# ufuncs that require dimensionless input and and give dimensionless output
dimensionless_to_dimensionless_ufuncs = (np.exp, np.expm1, np.exp2, np.log,
np.log10, np.log2, np.log1p)
for ufunc in dimensionless_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
# ufuncs that require dimensionless input and give output in radians
dimensionless_to_radian_ufuncs = (np.arccos, np.arcsin, np.arctan, np.arccosh,
np.arcsinh, np.arctanh)
for ufunc in dimensionless_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_radian
# ufuncs that require input in degrees and give output in radians
degree_to_radian_ufuncs = (np.radians, np.deg2rad)
for ufunc in degree_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_degree_to_radian
# ufuncs that require input in radians and give output in degrees
radian_to_degree_ufuncs = (np.degrees, np.rad2deg)
for ufunc in radian_to_degree_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_degree
# ufuncs that require input in radians and give dimensionless output
radian_to_dimensionless_ufuncs = (np.cos, np.sin, np.tan, np.cosh, np.sinh,
np.tanh)
for ufunc in radian_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_dimensionless
# ufuncs handled as special cases
UFUNC_HELPERS[np.sqrt] = helper_sqrt
UFUNC_HELPERS[np.square] = helper_square
UFUNC_HELPERS[np.reciprocal] = helper_reciprocal
UFUNC_HELPERS[np.cbrt] = helper_cbrt
UFUNC_HELPERS[np.core.umath._ones_like] = helper__ones_like
UFUNC_HELPERS[np.modf] = helper_modf
UFUNC_HELPERS[np.frexp] = helper_frexp
# TWO ARGUMENT UFUNCS
# two argument ufuncs that require dimensionless input and and give
# dimensionless output
two_arg_dimensionless_ufuncs = (np.logaddexp, np.logaddexp2)
for ufunc in two_arg_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless
# two argument ufuncs that return a value with the same unit as the input
twoarg_invariant_ufuncs = (np.add, np.subtract, np.hypot, np.maximum,
np.minimum, np.fmin, np.fmax, np.nextafter,
np.remainder, np.mod, np.fmod)
for ufunc in twoarg_invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invariant
# two argument ufuncs that need compatible inputs and return a boolean
twoarg_comparison_ufuncs = (np.greater, np.greater_equal, np.less,
np.less_equal, np.not_equal, np.equal)
for ufunc in twoarg_comparison_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_comparison
# two argument ufuncs that do inverse trigonometry
twoarg_invtrig_ufuncs = (np.arctan2,)
# another private function in numpy; use getattr in case it disappears
if isinstance(getattr(np.core.umath, '_arg', None), np.ufunc):
twoarg_invtrig_ufuncs += (np.core.umath._arg,)
for ufunc in twoarg_invtrig_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invtrig
# ufuncs handled as special cases
UFUNC_HELPERS[np.multiply] = helper_multiplication
UFUNC_HELPERS[np.divide] = helper_division
UFUNC_HELPERS[np.true_divide] = helper_division
UFUNC_HELPERS[np.power] = helper_power
UFUNC_HELPERS[np.ldexp] = helper_ldexp
UFUNC_HELPERS[np.copysign] = helper_copysign
UFUNC_HELPERS[np.floor_divide] = helper_twoarg_floor_divide
# heaviside only was added in numpy 1.13
if isinstance(getattr(np, 'heaviside', None), np.ufunc):
UFUNC_HELPERS[np.heaviside] = helper_heaviside
# float_power was added in numpy 1.12
if isinstance(getattr(np, 'float_power', None), np.ufunc):
UFUNC_HELPERS[np.float_power] = helper_power
# divmod only was added in numpy 1.13
if isinstance(getattr(np, 'divmod', None), np.ufunc):
UFUNC_HELPERS[np.divmod] = helper_divmod
# UFUNCS FROM SCIPY.SPECIAL
# available ufuncs in this module are at
# https://docs.scipy.org/doc/scipy/reference/special.html
try:
import scipy
import scipy.special as sps
except ImportError:
pass
else:
from ..utils import minversion
# ufuncs that require dimensionless input and give dimensionless output
dimensionless_to_dimensionless_sps_ufuncs = [
sps.erf, sps.gamma, sps.gammasgn,
sps.psi, sps.rgamma, sps.erfc, sps.erfcx, sps.erfi, sps.wofz,
sps.dawsn, sps.entr, sps.exprel, sps.expm1, sps.log1p, sps.exp2,
sps.exp10, sps.j0, sps.j1, sps.y0, sps.y1, sps.i0, sps.i0e, sps.i1,
sps.i1e, sps.k0, sps.k0e, sps.k1, sps.k1e, sps.itj0y0,
sps.it2j0y0, sps.iti0k0, sps.it2i0k0]
# TODO: Revert https://github.com/astropy/astropy/pull/7219 when astropy
# requires scipy>=0.18.
# See https://github.com/astropy/astropy/issues/7159
if minversion(scipy, "0.18"):
dimensionless_to_dimensionless_sps_ufuncs.append(sps.loggamma)
for ufunc in dimensionless_to_dimensionless_sps_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
# ufuncs that require input in degrees and give dimensionless output
degree_to_dimensionless_sps_ufuncs = (
sps.cosdg, sps.sindg, sps.tandg, sps.cotdg)
for ufunc in degree_to_dimensionless_sps_ufuncs:
UFUNC_HELPERS[ufunc] = helper_degree_to_dimensionless
# ufuncs that require 2 dimensionless inputs and give dimensionless output.
# note: sps.jv and sps.jn are aliases in some scipy versions, which will
# cause the same key to be written twice, but since both are handled by the
# same helper there is no harm done.
two_arg_dimensionless_sps_ufuncs = (
sps.jv, sps.jn, sps.jve, sps.yn, sps.yv, sps.yve, sps.kn, sps.kv,
sps.kve, sps.iv, sps.ive, sps.hankel1, sps.hankel1e, sps.hankel2,
sps.hankel2e)
for ufunc in two_arg_dimensionless_sps_ufuncs:
UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless
# ufuncs handled as special cases
UFUNC_HELPERS[sps.cbrt] = helper_cbrt
UFUNC_HELPERS[sps.radian] = helper_degree_minute_second_to_radian
def converters_and_unit(function, method, *args):
"""Determine the required converters and the unit of the ufunc result.
Converters are functions required to convert to a ufunc's expected unit,
e.g., radian for np.sin; or to ensure units of two inputs are consistent,
e.g., for np.add. In these examples, the unit of the result would be
dimensionless_unscaled for np.sin, and the same consistent unit for np.add.
Parameters
----------
function : `~numpy.ufunc`
Numpy universal function
method : str
Method with which the function is evaluated, e.g.,
'__call__', 'reduce', etc.
*args : Quantity or other ndarray subclass
Input arguments to the function
Raises
------
TypeError : when the specified function cannot be used with Quantities
(e.g., np.logical_or), or when the routine does not know how to handle
the specified function (in which case an issue should be raised on
https://github.com/astropy/astropy).
UnitTypeError : when the conversion to the required (or consistent) units
is not possible.
"""
# Check whether we support this ufunc, by getting the helper function
# (defined above) which returns a list of function(s) that convert the
# input(s) to the unit required for the ufunc, as well as the unit the
# result will have (a tuple of units if there are multiple outputs).
try:
ufunc_helper = UFUNC_HELPERS[function]
except KeyError:
if function in UNSUPPORTED_UFUNCS:
raise TypeError("Cannot use function '{0}' with quantities"
.format(function.__name__))
else:
raise TypeError("Unknown ufunc {0}. Please raise issue on "
"https://github.com/astropy/astropy"
.format(function.__name__))
if method == '__call__' or (method == 'outer' and function.nin == 2):
# Find out the units of the arguments passed to the ufunc; usually,
# at least one is a quantity, but for two-argument ufuncs, the second
# could also be a Numpy array, etc. These are given unit=None.
units = [getattr(arg, 'unit', None) for arg in args]
# Determine possible conversion functions, and the result unit.
converters, result_unit = ufunc_helper(function, *units)
if any(converter is False for converter in converters):
# for two-argument ufuncs with a quantity and a non-quantity,
# the quantity normally needs to be dimensionless, *except*
# if the non-quantity can have arbitrary unit, i.e., when it
# is all zero, infinity or NaN. In that case, the non-quantity
# can just have the unit of the quantity
# (this allows, e.g., `q > 0.` independent of unit)
maybe_arbitrary_arg = args[converters.index(False)]
try:
if can_have_arbitrary_unit(maybe_arbitrary_arg):
converters = [None, None]
else:
raise UnitsError("Can only apply '{0}' function to "
"dimensionless quantities when other "
"argument is not a quantity (unless the "
"latter is all zero/infinity/nan)"
.format(function.__name__))
except TypeError:
# _can_have_arbitrary_unit failed: arg could not be compared
# with zero or checked to be finite. Then, ufunc will fail too.
raise TypeError("Unsupported operand type(s) for ufunc {0}: "
"'{1}' and '{2}'"
.format(function.__name__,
args[0].__class__.__name__,
args[1].__class__.__name__))
# In the case of np.power and np.float_power, the unit itself needs to
# be modified by an amount that depends on one of the input values,
# so we need to treat this as a special case.
# TODO: find a better way to deal with this.
if result_unit is False:
if units[0] is None or units[0] == dimensionless_unscaled:
result_unit = dimensionless_unscaled
else:
if units[1] is None:
p = args[1]
else:
p = args[1].to(dimensionless_unscaled).value
try:
result_unit = units[0] ** p
except ValueError as exc:
# Changing the unit does not work for, e.g., array-shaped
# power, but this is OK if we're (scaled) dimensionless.
try:
converters[0] = units[0]._get_converter(
dimensionless_unscaled)
except UnitConversionError:
raise exc
else:
result_unit = dimensionless_unscaled
else: # methods for which the unit should stay the same
nin = function.nin
unit = getattr(args[0], 'unit', None)
if method == 'at' and nin <= 2:
if nin == 1:
units = [unit]
else:
units = [unit, getattr(args[2], 'unit', None)]
converters, result_unit = ufunc_helper(function, *units)
# ensure there is no 'converter' for indices (2nd argument)
converters.insert(1, None)
elif method in {'reduce', 'accumulate', 'reduceat'} and nin == 2:
converters, result_unit = ufunc_helper(function, unit, unit)
converters = converters[:1]
if method == 'reduceat':
# add 'scale' for indices (2nd argument)
converters += [None]
else:
if method in {'reduce', 'accumulate',
'reduceat', 'outer'} and nin != 2:
raise ValueError("{0} only supported for binary functions"
.format(method))
raise TypeError("Unexpected ufunc method {0}. If this should "
"work, please raise an issue on"
"https://github.com/astropy/astropy"
.format(method))
# for all but __call__ method, scaling is not allowed
if unit is not None and result_unit is None:
raise TypeError("Cannot use '{1}' method on ufunc {0} with a "
"Quantity instance as the result is not a "
"Quantity.".format(function.__name__, method))
if (converters[0] is not None or
(unit is not None and unit is not result_unit and
(not result_unit.is_equivalent(unit) or
result_unit.to(unit) != 1.))):
raise UnitsError("Cannot use '{1}' method on ufunc {0} with a "
"Quantity instance as it would change the unit."
.format(function.__name__, method))
return converters, result_unit
def check_output(output, unit, inputs, function=None):
"""Check that function output can be stored in the output array given.
Parameters
----------
output : array or `~astropy.units.Quantity` or tuple
Array that should hold the function output (or tuple of such arrays).
unit : `~astropy.units.Unit` or None, or tuple
Unit that the output will have, or `None` for pure numbers (should be
tuple of same if output is a tuple of outputs).
inputs : tuple
Any input arguments. These should be castable to the output.
function : callable
The function that will be producing the output. If given, used to
give a more informative error message.
Returns
-------
arrays : `~numpy.ndarray` view of ``output`` (or tuple of such views).
Raises
------
UnitTypeError : If ``unit`` is inconsistent with the class of ``output``
TypeError : If the ``inputs`` cannot be cast safely to ``output``.
"""
if isinstance(output, tuple):
return tuple(check_output(output_, unit_, inputs, function)
for output_, unit_ in zip(output, unit))
# ``None`` indicates no actual array is needed. This can happen, e.g.,
# with np.modf(a, out=(None, b)).
if output is None:
return None
if hasattr(output, '__quantity_subclass__'):
# Check that we're not trying to store a plain Numpy array or a
# Quantity with an inconsistent unit (e.g., not angular for Angle).
if unit is None:
raise TypeError("Cannot store non-quantity output{0} in {1} "
"instance".format(
(" from {0} function".format(function.__name__)
if function is not None else ""),
type(output)))
if output.__quantity_subclass__(unit)[0] is not type(output):
raise UnitTypeError(
"Cannot store output with unit '{0}'{1} "
"in {2} instance. Use {3} instance instead."
.format(unit, (" from {0} function".format(function.__name__)
if function is not None else ""), type(output),
output.__quantity_subclass__(unit)[0]))
# Turn into ndarray, so we do not loop into array_wrap/array_ufunc
# if the output is used to store results of a function.
output = output.view(np.ndarray)
else:
# output is not a Quantity, so cannot obtain a unit.
if not (unit is None or unit is dimensionless_unscaled):
raise UnitTypeError("Cannot store quantity with dimension "
"{0}in a non-Quantity instance."
.format("" if function is None else
"resulting from {0} function "
.format(function.__name__)))
# check we can handle the dtype (e.g., that we are not int
# when float is required).
if not np.can_cast(np.result_type(*inputs), output.dtype,
casting='same_kind'):
raise TypeError("Arguments cannot be cast safely to inplace "
"output with dtype={0}".format(output.dtype))
return output
|
6b6e47f961662e92e226a33b6a7a2bfa15ee0aecb62542d7d41b12f3ff55210e | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/coordinates
#
# You can then commit the changes to this file.
# angle_lextab.py. This file automatically created by PLY (version 3.10). Don't edit!
_tabversion = '3.10'
_lextokens = set(('COLON', 'DEGREE', 'HOUR', 'MINUTE', 'SECOND', 'SIGN', 'SIMPLE_UNIT', 'UFLOAT', 'UINT'))
_lexreflags = 64
_lexliterals = ''
_lexstateinfo = {'INITIAL': 'inclusive'}
_lexstatere = {'INITIAL': [('(?P<t_UFLOAT>((\\d+\\.\\d*)|(\\.\\d+))([eE][+-−]?\\d+)?)|(?P<t_UINT>\\d+)|(?P<t_SIGN>[+−-])|(?P<t_SIMPLE_UNIT>(?:Earcmin)|(?:Earcsec)|(?:Edeg)|(?:Erad)|(?:Garcmin)|(?:Garcsec)|(?:Gdeg)|(?:Grad)|(?:Marcmin)|(?:Marcsec)|(?:Mdeg)|(?:Mrad)|(?:Parcmin)|(?:Parcsec)|(?:Pdeg)|(?:Prad)|(?:Tarcmin)|(?:Tarcsec)|(?:Tdeg)|(?:Trad)|(?:Yarcmin)|(?:Yarcsec)|(?:Ydeg)|(?:Yrad)|(?:Zarcmin)|(?:Zarcsec)|(?:Zdeg)|(?:Zrad)|(?:aarcmin)|(?:aarcsec)|(?:adeg)|(?:arad)|(?:arcmin)|(?:arcminute)|(?:arcsec)|(?:arcsecond)|(?:attoarcminute)|(?:attoarcsecond)|(?:attodegree)|(?:attoradian)|(?:carcmin)|(?:carcsec)|(?:cdeg)|(?:centiarcminute)|(?:centiarcsecond)|(?:centidegree)|(?:centiradian)|(?:crad)|(?:cy)|(?:cycle)|(?:daarcmin)|(?:daarcsec)|(?:dadeg)|(?:darad)|(?:darcmin)|(?:darcsec)|(?:ddeg)|(?:decaarcminute)|(?:decaarcsecond)|(?:decadegree)|(?:decaradian)|(?:deciarcminute)|(?:deciarcsecond)|(?:decidegree)|(?:deciradian)|(?:dekaarcminute)|(?:dekaarcsecond)|(?:dekadegree)|(?:dekaradian)|(?:drad)|(?:exaarcminute)|(?:exaarcsecond)|(?:exadegree)|(?:exaradian)|(?:farcmin)|(?:farcsec)|(?:fdeg)|(?:femtoarcminute)|(?:femtoarcsecond)|(?:femtodegree)|(?:femtoradian)|(?:frad)|(?:gigaarcminute)|(?:gigaarcsecond)|(?:gigadegree)|(?:gigaradian)|(?:harcmin)|(?:harcsec)|(?:hdeg)|(?:hectoarcminute)|(?:hectoarcsecond)|(?:hectodegree)|(?:hectoradian)|(?:hrad)|(?:karcmin)|(?:karcsec)|(?:kdeg)|(?:kiloarcminute)|(?:kiloarcsecond)|(?:kilodegree)|(?:kiloradian)|(?:krad)|(?:marcmin)|(?:marcsec)|(?:mas)|(?:mdeg)|(?:megaarcminute)|(?:megaarcsecond)|(?:megadegree)|(?:megaradian)|(?:microarcminute)|(?:microarcsecond)|(?:microdegree)|(?:microradian)|(?:milliarcminute)|(?:milliarcsecond)|(?:millidegree)|(?:milliradian)|(?:mrad)|(?:nanoarcminute)|(?:nanoarcsecond)|(?:nanodegree)|(?:nanoradian)|(?:narcmin)|(?:narcsec)|(?:ndeg)|(?:nrad)|(?:parcmin)|(?:parcsec)|(?:pdeg)|(?:petaarcminute)|(?:petaarcsecond)|(?:petadegree)|(?:petaradian)|(?:picoarcminute)|(?:picoarcsecond)|(?:picodegree)|(?:picoradian)|(?:prad)|(?:rad)|(?:radian)|(?:teraarcminute)|(?:teraarcsecond)|(?:teradegree)|(?:teraradian)|(?:uarcmin)|(?:uarcsec)|(?:uas)|(?:udeg)|(?:urad)|(?:yarcmin)|(?:yarcsec)|(?:ydeg)|(?:yoctoarcminute)|(?:yoctoarcsecond)|(?:yoctodegree)|(?:yoctoradian)|(?:yottaarcminute)|(?:yottaarcsecond)|(?:yottadegree)|(?:yottaradian)|(?:yrad)|(?:zarcmin)|(?:zarcsec)|(?:zdeg)|(?:zeptoarcminute)|(?:zeptoarcsecond)|(?:zeptodegree)|(?:zeptoradian)|(?:zettaarcminute)|(?:zettaarcsecond)|(?:zettadegree)|(?:zettaradian)|(?:zrad))|(?P<t_SECOND>s(ec(ond(s)?)?)?|″|\\"|ˢ)|(?P<t_MINUTE>m(in(ute(s)?)?)?|′|\\\'|ᵐ)|(?P<t_DEGREE>d(eg(ree(s)?)?)?|°)|(?P<t_HOUR>hour(s)?|h(r)?|ʰ)|(?P<t_COLON>:)', [None, ('t_UFLOAT', 'UFLOAT'), None, None, None, None, ('t_UINT', 'UINT'), ('t_SIGN', 'SIGN'), ('t_SIMPLE_UNIT', 'SIMPLE_UNIT'), (None, 'SECOND'), None, None, None, (None, 'MINUTE'), None, None, None, (None, 'DEGREE'), None, None, None, (None, 'HOUR'), None, None, (None, 'COLON')])]}
_lexstateignore = {'INITIAL': ' '}
_lexstateerrorf = {'INITIAL': 't_error'}
_lexstateeoff = {}
|
521519c46e12616d3f49733bdd236abad46e2b429c709b5252584ea00e005c7b | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/coordinates
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
This module contains utility functions that are for internal use in
astropy.coordinates.angles. Mainly they are conversions from one format
of data to another.
"""
import os
from warnings import warn
import numpy as np
from .errors import (IllegalHourWarning, IllegalHourError,
IllegalMinuteWarning, IllegalMinuteError,
IllegalSecondWarning, IllegalSecondError)
from ..utils import format_exception
from .. import units as u
TAB_HEADER = """# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/coordinates
#
# You can then commit the changes to this file.
"""
class _AngleParser:
"""
Parses the various angle formats including:
* 01:02:30.43 degrees
* 1 2 0 hours
* 1°2′3″
* 1d2m3s
* -1h2m3s
This class should not be used directly. Use `parse_angle`
instead.
"""
def __init__(self):
# TODO: in principle, the parser should be invalidated if we change unit
# system (from CDS to FITS, say). Might want to keep a link to the
# unit_registry used, and regenerate the parser/lexer if it changes.
# Alternatively, perhaps one should not worry at all and just pre-
# generate the parser for each release (as done for unit formats).
# For some discussion of this problem, see
# https://github.com/astropy/astropy/issues/5350#issuecomment-248770151
if '_parser' not in _AngleParser.__dict__:
_AngleParser._parser, _AngleParser._lexer = self._make_parser()
@classmethod
def _get_simple_unit_names(cls):
simple_units = set(
u.radian.find_equivalent_units(include_prefix_units=True))
simple_unit_names = set()
# We filter out degree and hourangle, since those are treated
# separately.
for unit in simple_units:
if unit != u.deg and unit != u.hourangle:
simple_unit_names.update(unit.names)
return sorted(simple_unit_names)
@classmethod
def _make_parser(cls):
from ..extern.ply import lex, yacc
# List of token names.
tokens = (
'SIGN',
'UINT',
'UFLOAT',
'COLON',
'DEGREE',
'HOUR',
'MINUTE',
'SECOND',
'SIMPLE_UNIT'
)
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'((\d+\.\d*)|(\.\d+))([eE][+-−]?\d+)?'
# The above includes Unicode "MINUS SIGN" \u2212. It is
# important to include the hyphen last, or the regex will
# treat this as a range.
t.value = float(t.value.replace('−', '-'))
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+−-]'
# The above include Unicode "MINUS SIGN" \u2212. It is
# important to include the hyphen last, or the regex will
# treat this as a range.
if t.value == '+':
t.value = 1.0
else:
t.value = -1.0
return t
def t_SIMPLE_UNIT(t):
t.value = u.Unit(t.value)
return t
t_SIMPLE_UNIT.__doc__ = '|'.join(
'(?:{0})'.format(x) for x in cls._get_simple_unit_names())
t_COLON = ':'
t_DEGREE = r'd(eg(ree(s)?)?)?|°'
t_HOUR = r'hour(s)?|h(r)?|ʰ'
t_MINUTE = r'm(in(ute(s)?)?)?|′|\'|ᵐ'
t_SECOND = r's(ec(ond(s)?)?)?|″|\"|ˢ'
# A string containing ignored characters (spaces)
t_ignore = ' '
# Error handling rule
def t_error(t):
raise ValueError(
"Invalid character at col {0}".format(t.lexpos))
lexer_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'angle_lextab.py'))
# Build the lexer
lexer = lex.lex(optimize=True, lextab='angle_lextab',
outputdir=os.path.dirname(__file__))
if not lexer_exists:
cls._add_tab_header('angle_lextab')
def p_angle(p):
'''
angle : hms
| dms
| arcsecond
| arcminute
| simple
'''
p[0] = p[1]
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_ufloat(p):
'''
ufloat : UFLOAT
| UINT
'''
p[0] = float(p[1])
def p_colon(p):
'''
colon : sign UINT COLON ufloat
| sign UINT COLON UINT COLON ufloat
'''
if len(p) == 5:
p[0] = (p[1] * p[2], p[4])
elif len(p) == 7:
p[0] = (p[1] * p[2], p[4], p[6])
def p_spaced(p):
'''
spaced : sign UINT ufloat
| sign UINT UINT ufloat
'''
if len(p) == 4:
p[0] = (p[1] * p[2], p[3])
elif len(p) == 5:
p[0] = (p[1] * p[2], p[3], p[4])
def p_generic(p):
'''
generic : colon
| spaced
| sign UFLOAT
| sign UINT
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[1] * p[2]
def p_hms(p):
'''
hms : sign UINT HOUR
| sign UINT HOUR ufloat
| sign UINT HOUR UINT MINUTE
| sign UINT HOUR UFLOAT MINUTE
| sign UINT HOUR UINT MINUTE ufloat
| sign UINT HOUR UINT MINUTE ufloat SECOND
| generic HOUR
'''
if len(p) == 3:
p[0] = (p[1], u.hourangle)
elif len(p) == 4:
p[0] = (p[1] * p[2], u.hourangle)
elif len(p) in (5, 6):
p[0] = ((p[1] * p[2], p[4]), u.hourangle)
elif len(p) in (7, 8):
p[0] = ((p[1] * p[2], p[4], p[6]), u.hourangle)
def p_dms(p):
'''
dms : sign UINT DEGREE
| sign UINT DEGREE ufloat
| sign UINT DEGREE UINT MINUTE
| sign UINT DEGREE UFLOAT MINUTE
| sign UINT DEGREE UINT MINUTE ufloat
| sign UINT DEGREE UINT MINUTE ufloat SECOND
| generic DEGREE
'''
if len(p) == 3:
p[0] = (p[1], u.degree)
elif len(p) == 4:
p[0] = (p[1] * p[2], u.degree)
elif len(p) in (5, 6):
p[0] = ((p[1] * p[2], p[4]), u.degree)
elif len(p) in (7, 8):
p[0] = ((p[1] * p[2], p[4], p[6]), u.degree)
def p_simple(p):
'''
simple : generic
| generic SIMPLE_UNIT
'''
if len(p) == 2:
p[0] = (p[1], None)
else:
p[0] = (p[1], p[2])
def p_arcsecond(p):
'''
arcsecond : generic SECOND
'''
p[0] = (p[1], u.arcsecond)
def p_arcminute(p):
'''
arcminute : generic MINUTE
'''
p[0] = (p[1], u.arcminute)
def p_error(p):
raise ValueError
parser_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'angle_parsetab.py'))
parser = yacc.yacc(debug=False, tabmodule='angle_parsetab',
outputdir=os.path.dirname(__file__),
write_tables=True)
if not parser_exists:
cls._add_tab_header('angle_parsetab')
return parser, lexer
@classmethod
def _add_tab_header(cls, name):
lextab_file = os.path.join(os.path.dirname(__file__), name + '.py')
with open(lextab_file, 'r') as f:
contents = f.read()
with open(lextab_file, 'w') as f:
f.write(TAB_HEADER)
f.write(contents)
def parse(self, angle, unit, debug=False):
try:
found_angle, found_unit = self._parser.parse(
angle, lexer=self._lexer, debug=debug)
except ValueError as e:
if str(e):
raise ValueError("{0} in angle {1!r}".format(
str(e), angle))
else:
raise ValueError(
"Syntax error parsing angle {0!r}".format(angle))
if unit is None and found_unit is None:
raise u.UnitsError("No unit specified")
return found_angle, found_unit
def _check_hour_range(hrs):
"""
Checks that the given value is in the range (-24, 24).
"""
if np.any(np.abs(hrs) == 24.):
warn(IllegalHourWarning(hrs, 'Treating as 24 hr'))
elif np.any(hrs < -24.) or np.any(hrs > 24.):
raise IllegalHourError(hrs)
def _check_minute_range(m):
"""
Checks that the given value is in the range [0,60]. If the value
is equal to 60, then a warning is raised.
"""
if np.any(m == 60.):
warn(IllegalMinuteWarning(m, 'Treating as 0 min, +1 hr/deg'))
elif np.any(m < -60.) or np.any(m > 60.):
# "Error: minutes not in range [-60,60) ({0}).".format(min))
raise IllegalMinuteError(m)
def _check_second_range(sec):
"""
Checks that the given value is in the range [0,60]. If the value
is equal to 60, then a warning is raised.
"""
if np.any(sec == 60.):
warn(IllegalSecondWarning(sec, 'Treating as 0 sec, +1 min'))
elif sec is None:
pass
elif np.any(sec < -60.) or np.any(sec > 60.):
# "Error: seconds not in range [-60,60) ({0}).".format(sec))
raise IllegalSecondError(sec)
def check_hms_ranges(h, m, s):
"""
Checks that the given hour, minute and second are all within
reasonable range.
"""
_check_hour_range(h)
_check_minute_range(m)
_check_second_range(s)
return None
def parse_angle(angle, unit=None, debug=False):
"""
Parses an input string value into an angle value.
Parameters
----------
angle : str
A string representing the angle. May be in one of the following forms:
* 01:02:30.43 degrees
* 1 2 0 hours
* 1°2′3″
* 1d2m3s
* -1h2m3s
unit : `~astropy.units.UnitBase` instance, optional
The unit used to interpret the string. If ``unit`` is not
provided, the unit must be explicitly represented in the
string, either at the end or as number separators.
debug : bool, optional
If `True`, print debugging information from the parser.
Returns
-------
value, unit : tuple
``value`` is the value as a floating point number or three-part
tuple, and ``unit`` is a `Unit` instance which is either the
unit passed in or the one explicitly mentioned in the input
string.
"""
return _AngleParser().parse(angle, unit, debug=debug)
def degrees_to_dms(d):
"""
Convert a floating-point degree value into a ``(degree, arcminute,
arcsecond)`` tuple.
"""
sign = np.copysign(1.0, d)
(df, d) = np.modf(np.abs(d)) # (degree fraction, degree)
(mf, m) = np.modf(df * 60.) # (minute fraction, minute)
s = mf * 60.
return np.floor(sign * d), sign * np.floor(m), sign * s
def dms_to_degrees(d, m, s=None):
"""
Convert degrees, arcminute, arcsecond to a float degrees value.
"""
_check_minute_range(m)
_check_second_range(s)
# determine sign
sign = np.copysign(1.0, d)
try:
d = np.floor(np.abs(d))
if s is None:
m = np.abs(m)
s = 0
else:
m = np.floor(np.abs(m))
s = np.abs(s)
except ValueError:
raise ValueError(format_exception(
"{func}: dms values ({1[0]},{2[1]},{3[2]}) could not be "
"converted to numbers.", d, m, s))
return sign * (d + m / 60. + s / 3600.)
def hms_to_hours(h, m, s=None):
"""
Convert hour, minute, second to a float hour value.
"""
check_hms_ranges(h, m, s)
# determine sign
sign = np.copysign(1.0, h)
try:
h = np.floor(np.abs(h))
if s is None:
m = np.abs(m)
s = 0
else:
m = np.floor(np.abs(m))
s = np.abs(s)
except ValueError:
raise ValueError(format_exception(
"{func}: HMS values ({1[0]},{2[1]},{3[2]}) could not be "
"converted to numbers.", h, m, s))
return sign * (h + m / 60. + s / 3600.)
def hms_to_degrees(h, m, s):
"""
Convert hour, minute, second to a float degrees value.
"""
return hms_to_hours(h, m, s) * 15.
def hms_to_radians(h, m, s):
"""
Convert hour, minute, second to a float radians value.
"""
return u.degree.to(u.radian, hms_to_degrees(h, m, s))
def hms_to_dms(h, m, s):
"""
Convert degrees, arcminutes, arcseconds to an ``(hour, minute, second)``
tuple.
"""
return degrees_to_dms(hms_to_degrees(h, m, s))
def hours_to_decimal(h):
"""
Convert any parseable hour value into a float value.
"""
from . import angles
return angles.Angle(h, unit=u.hourangle).hour
def hours_to_radians(h):
"""
Convert an angle in Hours to Radians.
"""
return u.hourangle.to(u.radian, h)
def hours_to_hms(h):
"""
Convert an floating-point hour value into an ``(hour, minute,
second)`` tuple.
"""
sign = np.copysign(1.0, h)
(hf, h) = np.modf(np.abs(h)) # (degree fraction, degree)
(mf, m) = np.modf(hf * 60.0) # (minute fraction, minute)
s = mf * 60.0
return (np.floor(sign * h), sign * np.floor(m), sign * s)
def radians_to_degrees(r):
"""
Convert an angle in Radians to Degrees.
"""
return u.radian.to(u.degree, r)
def radians_to_hours(r):
"""
Convert an angle in Radians to Hours.
"""
return u.radian.to(u.hourangle, r)
def radians_to_hms(r):
"""
Convert an angle in Radians to an ``(hour, minute, second)`` tuple.
"""
hours = radians_to_hours(r)
return hours_to_hms(hours)
def radians_to_dms(r):
"""
Convert an angle in Radians to an ``(degree, arcminute,
arcsecond)`` tuple.
"""
degrees = u.radian.to(u.degree, r)
return degrees_to_dms(degrees)
def sexagesimal_to_string(values, precision=None, pad=False, sep=(':',),
fields=3):
"""
Given an already separated tuple of sexagesimal values, returns
a string.
See `hours_to_string` and `degrees_to_string` for a higher-level
interface to this functionality.
"""
# Check to see if values[0] is negative, using np.copysign to handle -0
sign = np.copysign(1.0, values[0])
# If the coordinates are negative, we need to take the absolute values.
# We use np.abs because abs(-0) is -0
# TODO: Is this true? (MHvK, 2018-02-01: not on my system)
values = [np.abs(value) for value in values]
if pad:
if sign == -1:
pad = 3
else:
pad = 2
else:
pad = 0
if not isinstance(sep, tuple):
sep = tuple(sep)
if fields < 1 or fields > 3:
raise ValueError(
"fields must be 1, 2, or 3")
if not sep: # empty string, False, or None, etc.
sep = ('', '', '')
elif len(sep) == 1:
if fields == 3:
sep = sep + (sep[0], '')
elif fields == 2:
sep = sep + ('', '')
else:
sep = ('', '', '')
elif len(sep) == 2:
sep = sep + ('',)
elif len(sep) != 3:
raise ValueError(
"Invalid separator specification for converting angle to string.")
# Simplify the expression based on the requested precision. For
# example, if the seconds will round up to 60, we should convert
# it to 0 and carry upwards. If the field is hidden (by the
# fields kwarg) we round up around the middle, 30.0.
if precision is None:
rounding_thresh = 60.0 - (10.0 ** -4)
else:
rounding_thresh = 60.0 - (10.0 ** -precision)
if fields == 3 and values[2] >= rounding_thresh:
values[2] = 0.0
values[1] += 1.0
elif fields < 3 and values[2] >= 30.0:
values[1] += 1.0
if fields >= 2 and values[1] >= 60.0:
values[1] = 0.0
values[0] += 1.0
elif fields < 2 and values[1] >= 30.0:
values[0] += 1.0
literal = []
last_value = ''
literal.append('{0:0{pad}.0f}{sep[0]}')
if fields >= 2:
literal.append('{1:02d}{sep[1]}')
if fields == 3:
if precision is None:
last_value = '{0:.4f}'.format(abs(values[2]))
last_value = last_value.rstrip('0').rstrip('.')
else:
last_value = '{0:.{precision}f}'.format(
abs(values[2]), precision=precision)
if len(last_value) == 1 or last_value[1] == '.':
last_value = '0' + last_value
literal.append('{last_value}{sep[2]}')
literal = ''.join(literal)
return literal.format(np.copysign(values[0], sign),
int(values[1]), values[2],
sep=sep, pad=pad,
last_value=last_value)
def hours_to_string(h, precision=5, pad=False, sep=('h', 'm', 's'),
fields=3):
"""
Takes a decimal hour value and returns a string formatted as hms with
separator specified by the 'sep' parameter.
``h`` must be a scalar.
"""
h, m, s = hours_to_hms(h)
return sexagesimal_to_string((h, m, s), precision=precision, pad=pad,
sep=sep, fields=fields)
def degrees_to_string(d, precision=5, pad=False, sep=':', fields=3):
"""
Takes a decimal hour value and returns a string formatted as dms with
separator specified by the 'sep' parameter.
``d`` must be a scalar.
"""
d, m, s = degrees_to_dms(d)
return sexagesimal_to_string((d, m, s), precision=precision, pad=pad,
sep=sep, fields=fields)
def angular_separation(lon1, lat1, lon2, lat2):
"""
Angular separation between two points on a sphere.
Parameters
----------
lon1, lat1, lon2, lat2 : `Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the two points. Quantities should be in
angular units; floats in radians.
Returns
-------
angular separation : `~astropy.units.Quantity` or float
Type depends on input; `Quantity` in angular units, or float in
radians.
Notes
-----
The angular separation is calculated using the Vincenty formula [1]_,
which is slightly more complex and computationally expensive than
some alternatives, but is stable at at all distances, including the
poles and antipodes.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
sdlon = np.sin(lon2 - lon1)
cdlon = np.cos(lon2 - lon1)
slat1 = np.sin(lat1)
slat2 = np.sin(lat2)
clat1 = np.cos(lat1)
clat2 = np.cos(lat2)
num1 = clat2 * sdlon
num2 = clat1 * slat2 - slat1 * clat2 * cdlon
denominator = slat1 * slat2 + clat1 * clat2 * cdlon
return np.arctan2(np.hypot(num1, num2), denominator)
def position_angle(lon1, lat1, lon2, lat2):
"""
Position Angle (East of North) between two points on a sphere.
Parameters
----------
lon1, lat1, lon2, lat2 : `Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the two points. Quantities should be in
angular units; floats in radians.
Returns
-------
pa : `~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from position 1 to
position 2. If any of the angles are arrays, this will contain an array
following the appropriate `numpy` broadcasting rules.
"""
from .angles import Angle
deltalon = lon2 - lon1
colat = np.cos(lat2)
x = np.sin(lat2) * np.cos(lat1) - colat * np.sin(lat1) * np.cos(deltalon)
y = np.sin(deltalon) * colat
return Angle(np.arctan2(y, x), u.radian).wrap_at(360*u.deg)
|
6555ed7218ac9cc1f02765bebd08b2bd859e9a8e7683a490d583a0fbe2ceb6aa | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains a general framework for defining graphs of transformations
between coordinates, suitable for either spatial coordinates or more generalized
coordinate systems.
The fundamental idea is that each class is a node in the transformation graph,
and transitions from one node to another are defined as functions (or methods)
wrapped in transformation objects.
This module also includes more specific transformation classes for
celestial/spatial coordinate frames, generally focused around matrix-style
transformations that are typically how the algorithms are defined.
"""
import heapq
import inspect
import subprocess
from warnings import warn
from abc import ABCMeta, abstractmethod
from collections import defaultdict, OrderedDict
from contextlib import suppress
from inspect import signature
import numpy as np
from .. import units as u
from ..utils.exceptions import AstropyWarning
from .representation import REPRESENTATION_CLASSES
__all__ = ['TransformGraph', 'CoordinateTransform', 'FunctionTransform',
'BaseAffineTransform', 'AffineTransform',
'StaticMatrixTransform', 'DynamicMatrixTransform',
'FunctionTransformWithFiniteDifference', 'CompositeTransform']
def frame_attrs_from_set(frame_set):
"""
A `dict` of all the attributes of all frame classes in this
`TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = {}
for frame_cls in frame_set:
result.update(frame_cls.frame_attributes)
return result
def frame_comps_from_set(frame_set):
"""
A `set` of all component names every defined within any frame class in
this `TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = set()
for frame_cls in frame_set:
rep_info = frame_cls._frame_specific_representation_info
for mappings in rep_info.values():
for rep_map in mappings:
result.update([rep_map.framename])
return result
class TransformGraph:
"""
A graph representing the paths between coordinate frames.
"""
def __init__(self):
self._graph = defaultdict(dict)
self.invalidate_cache() # generates cache entries
@property
def _cached_names(self):
if self._cached_names_dct is None:
self._cached_names_dct = dct = {}
for c in self.frame_set:
nm = getattr(c, 'name', None)
if nm is not None:
dct[nm] = c
return self._cached_names_dct
@property
def frame_set(self):
"""
A `set` of all the frame classes present in this `TransformGraph`.
"""
if self._cached_frame_set is None:
self._cached_frame_set = set()
for a in self._graph:
self._cached_frame_set.add(a)
for b in self._graph[a]:
self._cached_frame_set.add(b)
return self._cached_frame_set.copy()
@property
def frame_attributes(self):
"""
A `dict` of all the attributes of all frame classes in this
`TransformGraph`.
"""
if self._cached_frame_attributes is None:
self._cached_frame_attributes = frame_attrs_from_set(self.frame_set)
return self._cached_frame_attributes
@property
def frame_component_names(self):
"""
A `set` of all component names every defined within any frame class in
this `TransformGraph`.
"""
if self._cached_component_names is None:
self._cached_component_names = frame_comps_from_set(self.frame_set)
return self._cached_component_names
def invalidate_cache(self):
"""
Invalidates the cache that stores optimizations for traversing the
transform graph. This is called automatically when transforms
are added or removed, but will need to be called manually if
weights on transforms are modified inplace.
"""
self._cached_names_dct = None
self._cached_frame_set = None
self._cached_frame_attributes = None
self._cached_component_names = None
self._shortestpaths = {}
self._composite_cache = {}
def add_transform(self, fromsys, tosys, transform):
"""
Add a new coordinate transformation to the graph.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
transform : CoordinateTransform or similar callable
The transformation object. Typically a `CoordinateTransform` object,
although it may be some other callable that is called with the same
signature.
Raises
------
TypeError
If ``fromsys`` or ``tosys`` are not classes or ``transform`` is
not callable.
"""
if not inspect.isclass(fromsys):
raise TypeError('fromsys must be a class')
if not inspect.isclass(tosys):
raise TypeError('tosys must be a class')
if not callable(transform):
raise TypeError('transform must be callable')
frame_set = self.frame_set.copy()
frame_set.add(fromsys)
frame_set.add(tosys)
# Now we check to see if any attributes on the proposed frames override
# *any* component names, which we can't allow for some of the logic in
# the SkyCoord initializer to work
attrs = set(frame_attrs_from_set(frame_set).keys())
comps = frame_comps_from_set(frame_set)
invalid_attrs = attrs.intersection(comps)
if invalid_attrs:
invalid_frames = set()
for attr in invalid_attrs:
if attr in fromsys.frame_attributes:
invalid_frames.update([fromsys])
if attr in tosys.frame_attributes:
invalid_frames.update([tosys])
raise ValueError("Frame(s) {0} contain invalid attribute names: {1}"
"\nFrame attributes can not conflict with *any* of"
" the frame data component names (see"
" `frame_transform_graph.frame_component_names`)."
.format(list(invalid_frames), invalid_attrs))
self._graph[fromsys][tosys] = transform
self.invalidate_cache()
def remove_transform(self, fromsys, tosys, transform):
"""
Removes a coordinate transform from the graph.
Parameters
----------
fromsys : class or `None`
The coordinate frame *class* to start from. If `None`,
``transform`` will be searched for and removed (``tosys`` must
also be `None`).
tosys : class or `None`
The coordinate frame *class* to transform into. If `None`,
``transform`` will be searched for and removed (``fromsys`` must
also be `None`).
transform : callable or `None`
The transformation object to be removed or `None`. If `None`
and ``tosys`` and ``fromsys`` are supplied, there will be no
check to ensure the correct object is removed.
"""
if fromsys is None or tosys is None:
if not (tosys is None and fromsys is None):
raise ValueError('fromsys and tosys must both be None if either are')
if transform is None:
raise ValueError('cannot give all Nones to remove_transform')
# search for the requested transform by brute force and remove it
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
if b is transform:
del agraph[b]
break
else:
raise ValueError('Could not find transform {0} in the '
'graph'.format(transform))
else:
if transform is None:
self._graph[fromsys].pop(tosys, None)
else:
curr = self._graph[fromsys].get(tosys, None)
if curr is transform:
self._graph[fromsys].pop(tosys)
else:
raise ValueError('Current transform from {0} to {1} is not '
'{2}'.format(fromsys, tosys, transform))
self.invalidate_cache()
def find_shortest_path(self, fromsys, tosys):
"""
Computes the shortest distance along the transform graph from
one system to another.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
path : list of classes or `None`
The path from ``fromsys`` to ``tosys`` as an in-order sequence
of classes. This list includes *both* ``fromsys`` and
``tosys``. Is `None` if there is no possible path.
distance : number
The total distance/priority from ``fromsys`` to ``tosys``. If
priorities are not set this is the number of transforms
needed. Is ``inf`` if there is no possible path.
"""
inf = float('inf')
# special-case the 0 or 1-path
if tosys is fromsys:
if tosys not in self._graph[fromsys]:
# Means there's no transform necessary to go from it to itself.
return [tosys], 0
if tosys in self._graph[fromsys]:
# this will also catch the case where tosys is fromsys, but has
# a defined transform.
t = self._graph[fromsys][tosys]
return [fromsys, tosys], float(t.priority if hasattr(t, 'priority') else 1)
# otherwise, need to construct the path:
if fromsys in self._shortestpaths:
# already have a cached result
fpaths = self._shortestpaths[fromsys]
if tosys in fpaths:
return fpaths[tosys]
else:
return None, inf
# use Dijkstra's algorithm to find shortest path in all other cases
nodes = []
# first make the list of nodes
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
if fromsys not in nodes or tosys not in nodes:
# fromsys or tosys are isolated or not registered, so there's
# certainly no way to get from one to the other
return None, inf
edgeweights = {}
# construct another graph that is a dict of dicts of priorities
# (used as edge weights in Dijkstra's algorithm)
for a in self._graph:
edgeweights[a] = aew = {}
agraph = self._graph[a]
for b in agraph:
aew[b] = float(agraph[b].priority if hasattr(agraph[b], 'priority') else 1)
# entries in q are [distance, count, nodeobj, pathlist]
# count is needed because in py 3.x, tie-breaking fails on the nodes.
# this way, insertion order is preserved if the weights are the same
q = [[inf, i, n, []] for i, n in enumerate(nodes) if n is not fromsys]
q.insert(0, [0, -1, fromsys, []])
# this dict will store the distance to node from ``fromsys`` and the path
result = {}
# definitely starts as a valid heap because of the insert line; from the
# node to itself is always the shortest distance
while len(q) > 0:
d, orderi, n, path = heapq.heappop(q)
if d == inf:
# everything left is unreachable from fromsys, just copy them to
# the results and jump out of the loop
result[n] = (None, d)
for d, orderi, n, path in q:
result[n] = (None, d)
break
else:
result[n] = (path, d)
path.append(n)
if n not in edgeweights:
# this is a system that can be transformed to, but not from.
continue
for n2 in edgeweights[n]:
if n2 not in result: # already visited
# find where n2 is in the heap
for i in range(len(q)):
if q[i][2] == n2:
break
else:
raise ValueError('n2 not in heap - this should be impossible!')
newd = d + edgeweights[n][n2]
if newd < q[i][0]:
q[i][0] = newd
q[i][3] = list(path)
heapq.heapify(q)
# cache for later use
self._shortestpaths[fromsys] = result
return result[tosys]
def get_transform(self, fromsys, tosys):
"""
Generates and returns the `CompositeTransform` for a transformation
between two coordinate systems.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
trans : `CompositeTransform` or `None`
If there is a path from ``fromsys`` to ``tosys``, this is a
transform object for that path. If no path could be found, this is
`None`.
Notes
-----
This function always returns a `CompositeTransform`, because
`CompositeTransform` is slightly more adaptable in the way it can be
called than other transform classes. Specifically, it takes care of
intermediate steps of transformations in a way that is consistent with
1-hop transformations.
"""
if not inspect.isclass(fromsys):
raise TypeError('fromsys is not a class')
if not inspect.isclass(tosys):
raise TypeError('tosys is not a class')
path, distance = self.find_shortest_path(fromsys, tosys)
if path is None:
return None
transforms = []
currsys = fromsys
for p in path[1:]: # first element is fromsys so we skip it
transforms.append(self._graph[currsys][p])
currsys = p
fttuple = (fromsys, tosys)
if fttuple not in self._composite_cache:
comptrans = CompositeTransform(transforms, fromsys, tosys,
register_graph=False)
self._composite_cache[fttuple] = comptrans
return self._composite_cache[fttuple]
def lookup_name(self, name):
"""
Tries to locate the coordinate class with the provided alias.
Parameters
----------
name : str
The alias to look up.
Returns
-------
coordcls
The coordinate class corresponding to the ``name`` or `None` if
no such class exists.
"""
return self._cached_names.get(name, None)
def get_names(self):
"""
Returns all available transform names. They will all be
valid arguments to `lookup_name`.
Returns
-------
nms : list
The aliases for coordinate systems.
"""
return list(self._cached_names.keys())
def to_dot_graph(self, priorities=True, addnodes=[], savefn=None,
savelayout='plain', saveformat=None, color_edges=True):
"""
Converts this transform graph to the graphviz_ DOT format.
Optionally saves it (requires `graphviz`_ be installed and on your path).
.. _graphviz: http://www.graphviz.org/
Parameters
----------
priorities : bool
If `True`, show the priority values for each transform. Otherwise,
the will not be included in the graph.
addnodes : sequence of str
Additional coordinate systems to add (this can include systems
already in the transform graph, but they will only appear once).
savefn : `None` or str
The file name to save this graph to or `None` to not save
to a file.
savelayout : str
The graphviz program to use to layout the graph (see
graphviz_ for details) or 'plain' to just save the DOT graph
content. Ignored if ``savefn`` is `None`.
saveformat : str
The graphviz output format. (e.g. the ``-Txxx`` option for
the command line program - see graphviz docs for details).
Ignored if ``savefn`` is `None`.
color_edges : bool
Color the edges between two nodes (frames) based on the type of
transform. ``FunctionTransform``: red, ``StaticMatrixTransform``:
blue, ``DynamicMatrixTransform``: green.
Returns
-------
dotgraph : str
A string with the DOT format graph.
"""
nodes = []
# find the node names
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
for node in addnodes:
if node not in nodes:
nodes.append(node)
nodenames = []
invclsaliases = dict([(v, k) for k, v in self._cached_names.items()])
for n in nodes:
if n in invclsaliases:
nodenames.append('{0} [shape=oval label="{0}\\n`{1}`"]'.format(n.__name__, invclsaliases[n]))
else:
nodenames.append(n.__name__ + '[ shape=oval ]')
edgenames = []
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, 'priority') else 1
color = trans_to_color[transform.__class__] if color_edges else 'black'
edgenames.append((a.__name__, b.__name__, pri, color))
# generate simple dot format graph
lines = ['digraph AstropyCoordinateTransformGraph {']
lines.append('; '.join(nodenames) + ';')
for enm1, enm2, weights, color in edgenames:
labelstr_fmt = '[ {0} {1} ]'
if priorities:
priority_part = 'label = "{0}"'.format(weights)
else:
priority_part = ''
color_part = 'color = "{0}"'.format(color)
labelstr = labelstr_fmt.format(priority_part, color_part)
lines.append('{0} -> {1}{2};'.format(enm1, enm2, labelstr))
lines.append('')
lines.append('overlap=false')
lines.append('}')
dotgraph = '\n'.join(lines)
if savefn is not None:
if savelayout == 'plain':
with open(savefn, 'w') as f:
f.write(dotgraph)
else:
args = [savelayout]
if saveformat is not None:
args.append('-T' + saveformat)
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate(dotgraph)
if proc.returncode != 0:
raise OSError('problem running graphviz: \n' + stderr)
with open(savefn, 'w') as f:
f.write(stdout)
return dotgraph
def to_networkx_graph(self):
"""
Converts this transform graph into a networkx graph.
.. note::
You must have the `networkx <http://networkx.lanl.gov/>`_
package installed for this to work.
Returns
-------
nxgraph : `networkx.Graph <http://networkx.lanl.gov/reference/classes.graph.html>`_
This `TransformGraph` as a `networkx.Graph`_.
"""
import networkx as nx
nxgraph = nx.Graph()
# first make the nodes
for a in self._graph:
if a not in nxgraph:
nxgraph.add_node(a)
for b in self._graph[a]:
if b not in nxgraph:
nxgraph.add_node(b)
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, 'priority') else 1
color = trans_to_color[transform.__class__]
nxgraph.add_edge(a, b, weight=pri, color=color)
return nxgraph
def transform(self, transcls, fromsys, tosys, priority=1, **kwargs):
"""
A function decorator for defining transformations.
.. note::
If decorating a static method of a class, ``@staticmethod``
should be added *above* this decorator.
Parameters
----------
transcls : class
The class of the transformation object to create.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Additional keyword arguments are passed into the ``transcls``
constructor.
Returns
-------
deco : function
A function that can be called on another function as a decorator
(see example).
Notes
-----
This decorator assumes the first argument of the ``transcls``
initializer accepts a callable, and that the second and third
are ``fromsys`` and ``tosys``. If this is not true, you should just
initialize the class manually and use `add_transform` instead of
using this decorator.
Examples
--------
::
graph = TransformGraph()
class Frame1(BaseCoordinateFrame):
...
class Frame2(BaseCoordinateFrame):
...
@graph.transform(FunctionTransform, Frame1, Frame2)
def f1_to_f2(f1_obj):
... do something with f1_obj ...
return f2_obj
"""
def deco(func):
# this doesn't do anything directly with the transform because
# ``register_graph=self`` stores it in the transform graph
# automatically
transcls(func, fromsys, tosys, priority=priority,
register_graph=self, **kwargs)
return func
return deco
# <-------------------Define the builtin transform classes-------------------->
class CoordinateTransform(metaclass=ABCMeta):
"""
An object that transforms a coordinate from one system to another.
Subclasses must implement `__call__` with the provided signature.
They should also call this superclass's ``__init__`` in their
``__init__``.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
"""
def __init__(self, fromsys, tosys, priority=1, register_graph=None):
if not inspect.isclass(fromsys):
raise TypeError('fromsys must be a class')
if not inspect.isclass(tosys):
raise TypeError('tosys must be a class')
self.fromsys = fromsys
self.tosys = tosys
self.priority = float(priority)
if register_graph:
# this will do the type-checking when it adds to the graph
self.register(register_graph)
else:
if not inspect.isclass(fromsys) or not inspect.isclass(tosys):
raise TypeError('fromsys and tosys must be classes')
self.overlapping_frame_attr_names = overlap = []
if (hasattr(fromsys, 'get_frame_attr_names') and
hasattr(tosys, 'get_frame_attr_names')):
# the if statement is there so that non-frame things might be usable
# if it makes sense
for from_nm in fromsys.get_frame_attr_names():
if from_nm in tosys.get_frame_attr_names():
overlap.append(from_nm)
def register(self, graph):
"""
Add this transformation to the requested Transformation graph,
replacing anything already connecting these two coordinates.
Parameters
----------
graph : a TransformGraph object
The graph to register this transformation with.
"""
graph.add_transform(self.fromsys, self.tosys, self)
def unregister(self, graph):
"""
Remove this transformation from the requested transformation
graph.
Parameters
----------
graph : a TransformGraph object
The graph to unregister this transformation from.
Raises
------
ValueError
If this is not currently in the transform graph.
"""
graph.remove_transform(self.fromsys, self.tosys, self)
@abstractmethod
def __call__(self, fromcoord, toframe):
"""
Does the actual coordinate transformation from the ``fromsys`` class to
the ``tosys`` class.
Parameters
----------
fromcoord : fromsys object
An object of class matching ``fromsys`` that is to be transformed.
toframe : object
An object that has the attributes necessary to fully specify the
frame. That is, it must have attributes with names that match the
keys of the dictionary that ``tosys.get_frame_attr_names()``
returns. Typically this is of class ``tosys``, but it *might* be
some other class as long as it has the appropriate attributes.
Returns
-------
tocoord : tosys object
The new coordinate after the transform has been applied.
"""
class FunctionTransform(CoordinateTransform):
"""
A coordinate transformation defined by a function that accepts a
coordinate object and returns the transformed coordinate object.
Parameters
----------
func : callable
The transformation function. Should have a call signature
``func(formcoord, toframe)``. Note that, unlike
`CoordinateTransform.__call__`, ``toframe`` is assumed to be of type
``tosys`` for this function.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``func`` is not callable.
ValueError
If ``func`` cannot accept two arguments.
"""
def __init__(self, func, fromsys, tosys, priority=1, register_graph=None):
if not callable(func):
raise TypeError('func must be callable')
with suppress(TypeError):
sig = signature(func)
kinds = [x.kind for x in sig.parameters.values()]
if (len(x for x in kinds if x == sig.POSITIONAL_ONLY) != 2
and sig.VAR_POSITIONAL not in kinds):
raise ValueError('provided function does not accept two arguments')
self.func = func
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
def __call__(self, fromcoord, toframe):
res = self.func(fromcoord, toframe)
if not isinstance(res, self.tosys):
raise TypeError('the transformation function yielded {0} but '
'should have been of type {1}'.format(res, self.tosys))
if fromcoord.data.differentials and not res.data.differentials:
warn("Applied a FunctionTransform to a coordinate frame with "
"differentials, but the FunctionTransform does not handle "
"differentials, so they have been dropped.", AstropyWarning)
return res
class FunctionTransformWithFiniteDifference(FunctionTransform):
r"""
A coordinate transformation that works like a `FunctionTransform`, but
computes velocity shifts based on the finite-difference relative to one of
the frame attributes. Note that the transform function should *not* change
the differential at all in this case, as any differentials will be
overridden.
When a differential is in the from coordinate, the finite difference
calculation has two components. The first part is simple the existing
differential, but re-orientation (using finite-difference techniques) to
point in the direction the velocity vector has in the *new* frame. The
second component is the "induced" velocity. That is, the velocity
intrinsic to the frame itself, estimated by shifting the frame using the
``finite_difference_frameattr_name`` frame attribute a small amount
(``finite_difference_dt``) in time and re-calculating the position.
Parameters
----------
finite_difference_frameattr_name : str or None
The name of the frame attribute on the frames to use for the finite
difference. Both the to and the from frame will be checked for this
attribute, but only one needs to have it. If None, no velocity
component induced from the frame itself will be included - only the
re-orientation of any exsiting differential.
finite_difference_dt : `~astropy.units.Quantity` or callable
If a quantity, this is the size of the differential used to do the
finite difference. If a callable, should accept
``(fromcoord, toframe)`` and return the ``dt`` value.
symmetric_finite_difference : bool
If True, the finite difference is computed as
:math:`\frac{x(t + \Delta t / 2) - x(t + \Delta t / 2)}{\Delta t}`, or
if False, :math:`\frac{x(t + \Delta t) - x(t)}{\Delta t}`. The latter
case has slightly better performance (and more stable finite difference
behavior).
All other parameters are identical to the initializer for
`FunctionTransform`.
"""
def __init__(self, func, fromsys, tosys, priority=1, register_graph=None,
finite_difference_frameattr_name='obstime',
finite_difference_dt=1*u.second,
symmetric_finite_difference=True):
super().__init__(func, fromsys, tosys, priority, register_graph)
self.finite_difference_frameattr_name = finite_difference_frameattr_name
self.finite_difference_dt = finite_difference_dt
self.symmetric_finite_difference = symmetric_finite_difference
@property
def finite_difference_frameattr_name(self):
return self._finite_difference_frameattr_name
@finite_difference_frameattr_name.setter
def finite_difference_frameattr_name(self, value):
if value is None:
self._diff_attr_in_fromsys = self._diff_attr_in_tosys = False
else:
diff_attr_in_fromsys = value in self.fromsys.frame_attributes
diff_attr_in_tosys = value in self.tosys.frame_attributes
if diff_attr_in_fromsys or diff_attr_in_tosys:
self._diff_attr_in_fromsys = diff_attr_in_fromsys
self._diff_attr_in_tosys = diff_attr_in_tosys
else:
raise ValueError('Frame attribute name {} is not a frame '
'attribute of {} or {}'.format(value,
self.fromsys,
self.tosys))
self._finite_difference_frameattr_name = value
def __call__(self, fromcoord, toframe):
from .representation import (CartesianRepresentation,
CartesianDifferential)
supcall = self.func
if fromcoord.data.differentials:
# this is the finite difference case
if callable(self.finite_difference_dt):
dt = self.finite_difference_dt(fromcoord, toframe)
else:
dt = self.finite_difference_dt
halfdt = dt/2
from_diffless = fromcoord.realize_frame(fromcoord.data.without_differentials())
reprwithoutdiff = supcall(from_diffless, toframe)
# first we use the existing differential to compute an offset due to
# the already-existing velocity, but in the new frame
fromcoord_cart = fromcoord.cartesian
if self.symmetric_finite_difference:
fwdxyz = (fromcoord_cart.xyz +
fromcoord_cart.differentials['s'].d_xyz*halfdt)
fwd = supcall(fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe)
backxyz = (fromcoord_cart.xyz -
fromcoord_cart.differentials['s'].d_xyz*halfdt)
back = supcall(fromcoord.realize_frame(CartesianRepresentation(backxyz)), toframe)
else:
fwdxyz = (fromcoord_cart.xyz +
fromcoord_cart.differentials['s'].d_xyz*dt)
fwd = supcall(fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe)
back = reprwithoutdiff
diffxyz = (fwd.cartesian - back.cartesian).xyz / dt
# now we compute the "induced" velocities due to any movement in
# the frame itself over time
attrname = self.finite_difference_frameattr_name
if attrname is not None:
if self.symmetric_finite_difference:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + halfdt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + halfdt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) - halfdt}
from_diffless_back = from_diffless.replicate(**kws)
else:
from_diffless_back = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) - halfdt}
back_frame = toframe.replicate_without_data(**kws)
else:
back_frame = toframe
back = supcall(from_diffless_back, back_frame)
else:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + dt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + dt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
back = reprwithoutdiff
diffxyz += (fwd.cartesian - back.cartesian).xyz / dt
newdiff = CartesianDifferential(diffxyz)
reprwithdiff = reprwithoutdiff.data.to_cartesian().with_differentials(newdiff)
return reprwithoutdiff.realize_frame(reprwithdiff)
else:
return supcall(fromcoord, toframe)
class BaseAffineTransform(CoordinateTransform):
"""Base class for common functionality between the ``AffineTransform``-type
subclasses.
This base class is needed because ``AffineTransform`` and the matrix
transform classes share the ``_apply_transform()`` method, but have
different ``__call__()`` methods. ``StaticMatrixTransform`` passes in a
matrix stored as a class attribute, and both of the matrix transforms pass
in ``None`` for the offset. Hence, user subclasses would likely want to
subclass this (rather than ``AffineTransform``) if they want to provide
alternative transformations using this machinery.
"""
def _apply_transform(self, fromcoord, matrix, offset):
from .representation import (UnitSphericalRepresentation,
CartesianDifferential,
SphericalDifferential,
SphericalCosLatDifferential,
RadialDifferential)
data = fromcoord.data
has_velocity = 's' in data.differentials
# list of unit differentials
_unit_diffs = (SphericalDifferential._unit_differential,
SphericalCosLatDifferential._unit_differential)
unit_vel_diff = (has_velocity and
isinstance(data.differentials['s'], _unit_diffs))
rad_vel_diff = (has_velocity and
isinstance(data.differentials['s'], RadialDifferential))
# Some initial checking to short-circuit doing any re-representation if
# we're going to fail anyways:
if isinstance(data, UnitSphericalRepresentation) and offset is not None:
raise TypeError("Position information stored on coordinate frame "
"is insufficient to do a full-space position "
"transformation (representation class: {0})"
.format(data.__class__))
elif (has_velocity and (unit_vel_diff or rad_vel_diff) and
offset is not None and 's' in offset.differentials):
# Coordinate has a velocity, but it is not a full-space velocity
# that we need to do a velocity offset
raise TypeError("Velocity information stored on coordinate frame "
"is insufficient to do a full-space velocity "
"transformation (differential class: {0})"
.format(data.differentials['s'].__class__))
elif len(data.differentials) > 1:
# We should never get here because the frame initializer shouldn't
# allow more differentials, but this just adds protection for
# subclasses that somehow skip the checks
raise ValueError("Representation passed to AffineTransform contains"
" multiple associated differentials. Only a single"
" differential with velocity units is presently"
" supported (differentials: {0})."
.format(str(data.differentials)))
# If the representation is a UnitSphericalRepresentation, and this is
# just a MatrixTransform, we have to try to turn the differential into a
# Unit version of the differential (if no radial velocity) or a
# sphericaldifferential with zero proper motion (if only a radial
# velocity) so that the matrix operation works
if (has_velocity and isinstance(data, UnitSphericalRepresentation) and
not unit_vel_diff and not rad_vel_diff):
# retrieve just velocity differential
unit_diff = data.differentials['s'].represent_as(
data.differentials['s']._unit_differential, data)
data = data.with_differentials({'s': unit_diff}) # updates key
# If it's a RadialDifferential, we flat-out ignore the differentials
# This is because, by this point (past the validation above), we can
# only possibly be doing a rotation-only transformation, and that
# won't change the radial differential. We later add it back in
elif rad_vel_diff:
data = data.without_differentials()
# Convert the representation and differentials to cartesian without
# having them attached to a frame
rep = data.to_cartesian()
diffs = dict([(k, diff.represent_as(CartesianDifferential, data))
for k, diff in data.differentials.items()])
rep = rep.with_differentials(diffs)
# Only do transform if matrix is specified. This is for speed in
# transformations that only specify an offset (e.g., LSR)
if matrix is not None:
# Note: this applies to both representation and differentials
rep = rep.transform(matrix)
# TODO: if we decide to allow arithmetic between representations that
# contain differentials, this can be tidied up
if offset is not None:
newrep = (rep.without_differentials() +
offset.without_differentials())
else:
newrep = rep.without_differentials()
# We need a velocity (time derivative) and, for now, are strict: the
# representation can only contain a velocity differential and no others.
if has_velocity and not rad_vel_diff:
veldiff = rep.differentials['s'] # already in Cartesian form
if offset is not None and 's' in offset.differentials:
veldiff = veldiff + offset.differentials['s']
newrep = newrep.with_differentials({'s': veldiff})
if isinstance(fromcoord.data, UnitSphericalRepresentation):
# Special-case this because otherwise the return object will think
# it has a valid distance with the default return (a
# CartesianRepresentation instance)
if has_velocity and not unit_vel_diff and not rad_vel_diff:
# We have to first represent as the Unit types we converted to,
# then put the d_distance information back in to the
# differentials and re-represent as their original forms
newdiff = newrep.differentials['s']
_unit_cls = fromcoord.data.differentials['s']._unit_differential
newdiff = newdiff.represent_as(_unit_cls, newrep)
kwargs = dict([(comp, getattr(newdiff, comp))
for comp in newdiff.components])
kwargs['d_distance'] = fromcoord.data.differentials['s'].d_distance
diffs = {'s': fromcoord.data.differentials['s'].__class__(
copy=False, **kwargs)}
elif has_velocity and unit_vel_diff:
newdiff = newrep.differentials['s'].represent_as(
fromcoord.data.differentials['s'].__class__, newrep)
diffs = {'s': newdiff}
else:
diffs = newrep.differentials
newrep = newrep.represent_as(fromcoord.data.__class__) # drops diffs
newrep = newrep.with_differentials(diffs)
elif has_velocity and unit_vel_diff:
# Here, we're in the case where the representation is not
# UnitSpherical, but the differential *is* one of the UnitSpherical
# types. We have to convert back to that differential class or the
# resulting frame will think it has a valid radial_velocity. This
# can probably be cleaned up: we currently have to go through the
# dimensional version of the differential before representing as the
# unit differential so that the units work out (the distance length
# unit shouldn't appear in the resulting proper motions)
diff_cls = fromcoord.data.differentials['s'].__class__
newrep = newrep.represent_as(fromcoord.data.__class__,
diff_cls._dimensional_differential)
newrep = newrep.represent_as(fromcoord.data.__class__, diff_cls)
# We pulled the radial differential off of the representation
# earlier, so now we need to put it back. But, in order to do that, we
# have to turn the representation into a repr that is compatible with
# having a RadialDifferential
if has_velocity and rad_vel_diff:
newrep = newrep.represent_as(fromcoord.data.__class__)
newrep = newrep.with_differentials(
{'s': fromcoord.data.differentials['s']})
return newrep
class AffineTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a 3 x 3
cartesian transformation matrix and a tuple of displacement vectors.
See `~astropy.coordinates.builtin_frames.galactocentric.Galactocentric` for
an example.
Parameters
----------
transform_func : callable
A callable that has the signature ``transform_func(fromcoord, toframe)``
and returns: a (3, 3) matrix that operates on ``fromcoord`` in a
Cartesian representation, and a ``CartesianRepresentation`` with
(optionally) an attached velocity ``CartesianDifferential`` to represent
a translation and offset in velocity to apply after the matrix
operation.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``transform_func`` is not callable
"""
def __init__(self, transform_func, fromsys, tosys, priority=1,
register_graph=None):
if not callable(transform_func):
raise TypeError('transform_func is not callable')
self.transform_func = transform_func
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
def __call__(self, fromcoord, toframe):
M, vec = self.transform_func(fromcoord, toframe)
newrep = self._apply_transform(fromcoord, M, vec)
return toframe.realize_frame(newrep)
class StaticMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation defined as a 3 x 3 cartesian
transformation matrix.
This is distinct from DynamicMatrixTransform in that this kind of matrix is
independent of frame attributes. That is, it depends *only* on the class of
the frame.
Parameters
----------
matrix : array-like or callable
A 3 x 3 matrix for transforming 3-vectors. In most cases will
be unitary (although this is not strictly required). If a callable,
will be called *with no arguments* to get the matrix.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
ValueError
If the matrix is not 3 x 3
"""
def __init__(self, matrix, fromsys, tosys, priority=1, register_graph=None):
if callable(matrix):
matrix = matrix()
self.matrix = np.array(matrix)
if self.matrix.shape != (3, 3):
raise ValueError('Provided matrix is not 3 x 3')
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
def __call__(self, fromcoord, toframe):
newrep = self._apply_transform(fromcoord, self.matrix, None)
return toframe.realize_frame(newrep)
class DynamicMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a
3 x 3 cartesian transformation matrix.
This is similar to, but distinct from StaticMatrixTransform, in that the
matrix for this class might depend on frame attributes.
Parameters
----------
matrix_func : callable
A callable that has the signature ``matrix_func(fromcoord, toframe)`` and
returns a 3 x 3 matrix that converts ``fromcoord`` in a cartesian
representation to the new coordinate system.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``matrix_func`` is not callable
"""
def __init__(self, matrix_func, fromsys, tosys, priority=1,
register_graph=None):
if not callable(matrix_func):
raise TypeError('matrix_func is not callable')
self.matrix_func = matrix_func
def _transform_func(fromcoord, toframe):
return self.matrix_func(fromcoord, toframe), None
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
def __call__(self, fromcoord, toframe):
M = self.matrix_func(fromcoord, toframe)
newrep = self._apply_transform(fromcoord, M, None)
return toframe.realize_frame(newrep)
class CompositeTransform(CoordinateTransform):
"""
A transformation constructed by combining together a series of single-step
transformations.
Note that the intermediate frame objects are constructed using any frame
attributes in ``toframe`` or ``fromframe`` that overlap with the intermediate
frame (``toframe`` favored over ``fromframe`` if there's a conflict). Any frame
attributes that are not present use the defaults.
Parameters
----------
transforms : sequence of `CoordinateTransform` objects
The sequence of transformations to apply.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
collapse_static_mats : bool
If `True`, consecutive `StaticMatrixTransform` will be collapsed into a
single transformation to speed up the calculation.
"""
def __init__(self, transforms, fromsys, tosys, priority=1,
register_graph=None, collapse_static_mats=True):
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
if collapse_static_mats:
transforms = self._combine_statics(transforms)
self.transforms = tuple(transforms)
def _combine_statics(self, transforms):
"""
Combines together sequences of `StaticMatrixTransform`s into a single
transform and returns it.
"""
newtrans = []
for currtrans in transforms:
lasttrans = newtrans[-1] if len(newtrans) > 0 else None
if (isinstance(lasttrans, StaticMatrixTransform) and
isinstance(currtrans, StaticMatrixTransform)):
combinedmat = np.dot(lasttrans.matrix, currtrans.matrix)
newtrans[-1] = StaticMatrixTransform(combinedmat,
lasttrans.fromsys,
currtrans.tosys)
else:
newtrans.append(currtrans)
return newtrans
def __call__(self, fromcoord, toframe):
curr_coord = fromcoord
for t in self.transforms:
# build an intermediate frame with attributes taken from either
# `fromframe`, or if not there, `toframe`, or if not there, use
# the defaults
# TODO: caching this information when creating the transform may
# speed things up a lot
frattrs = {}
for inter_frame_attr_nm in t.tosys.get_frame_attr_names():
if hasattr(toframe, inter_frame_attr_nm):
attr = getattr(toframe, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
elif hasattr(fromcoord, inter_frame_attr_nm):
attr = getattr(fromcoord, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
curr_toframe = t.tosys(**frattrs)
curr_coord = t(curr_coord, curr_toframe)
# this is safe even in the case where self.transforms is empty, because
# coordinate objects are immutible, so copying is not needed
return curr_coord
# map class names to colorblind-safe colors
trans_to_color = OrderedDict()
trans_to_color[AffineTransform] = '#555555' # gray
trans_to_color[FunctionTransform] = '#783001' # dark red-ish/brown
trans_to_color[FunctionTransformWithFiniteDifference] = '#d95f02' # red-ish
trans_to_color[StaticMatrixTransform] = '#7570b3' # blue-ish
trans_to_color[DynamicMatrixTransform] = '#1b9e77' # green-ish
|
71b4883638abe8a83789b6258a083f84b18f20dc1a8dba04f4363068c914cfb9 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Framework and base classes for coordinate frames/"low-level" coordinate
classes.
"""
# Standard library
import abc
import copy
import inspect
from collections import namedtuple, OrderedDict, defaultdict
import warnings
# Dependencies
import numpy as np
# Project
from ..utils.compat.misc import override__dir__
from ..utils.decorators import lazyproperty, format_doc
from ..utils.exceptions import AstropyWarning
from .. import units as u
from ..utils import (OrderedDescriptorContainer, ShapedLikeNDArray,
check_broadcast)
from .transformations import TransformGraph
from . import representation as r
from .angles import Angle
from .attributes import Attribute
# Import old names for Attributes so we don't break backwards-compatibility
# (some users rely on them being here, although that is not encouraged, as this
# is not the public API location -- see attributes.py).
from .attributes import (
TimeFrameAttribute, QuantityFrameAttribute,
EarthLocationAttribute, CoordinateAttribute,
CartesianRepresentationFrameAttribute) # pylint: disable=W0611
__all__ = ['BaseCoordinateFrame', 'frame_transform_graph',
'GenericFrame', 'RepresentationMapping']
# the graph used for all transformations between frames
frame_transform_graph = TransformGraph()
def _get_repr_cls(value):
"""
Return a valid representation class from ``value`` or raise exception.
"""
if value in r.REPRESENTATION_CLASSES:
value = r.REPRESENTATION_CLASSES[value]
elif (not isinstance(value, type) or
not issubclass(value, r.BaseRepresentation)):
raise ValueError(
'Representation is {0!r} but must be a BaseRepresentation class '
'or one of the string aliases {1}'.format(
value, list(r.REPRESENTATION_CLASSES)))
return value
def _get_diff_cls(value):
"""
Return a valid differential class from ``value`` or raise exception.
As originally created, this is only used in the SkyCoord initializer, so if
that is refactored, this function my no longer be necessary.
"""
if value in r.DIFFERENTIAL_CLASSES:
value = r.DIFFERENTIAL_CLASSES[value]
elif (not isinstance(value, type) or
not issubclass(value, r.BaseDifferential)):
raise ValueError(
'Differential is {0!r} but must be a BaseDifferential class '
'or one of the string aliases {1}'.format(
value, list(r.DIFFERENTIAL_CLASSES)))
return value
def _get_repr_classes(base, **differentials):
"""Get valid representation and differential classes.
Parameters
----------
base : str or `~astropy.coordinates.BaseRepresentation` subclass
class for the representation of the base coordinates. If a string,
it is looked up among the known representation classes.
**differentials : dict of str or `~astropy.coordinates.BaseDifferentials`
Keys are like for normal differentials, i.e., 's' for a first
derivative in time, etc. If an item is set to `None`, it will be
guessed from the base class.
Returns
-------
repr_classes : dict of subclasses
The base class is keyed by 'base'; the others by the keys of
``diffferentials``.
"""
base = _get_repr_cls(base)
repr_classes = {'base': base}
for name, differential_type in differentials.items():
if differential_type == 'base':
# We don't want to fail for this case.
differential_type = r.DIFFERENTIAL_CLASSES.get(base.get_name(), None)
elif differential_type in r.DIFFERENTIAL_CLASSES:
differential_type = r.DIFFERENTIAL_CLASSES[differential_type]
elif (differential_type is not None and
(not isinstance(differential_type, type) or
not issubclass(differential_type, r.BaseDifferential))):
raise ValueError(
'Differential is {0!r} but must be a BaseDifferential class '
'or one of the string aliases {1}'.format(
differential_type, list(r.DIFFERENTIAL_CLASSES)))
repr_classes[name] = differential_type
return repr_classes
def _normalize_representation_type(kwargs):
""" This is added for backwards compatibility: if the user specifies the
old-style argument ``representation``, add it back in to the kwargs dict
as ``representation_type``.
"""
if 'representation' in kwargs:
if 'representation_type' in kwargs:
raise ValueError("Both `representation` and `representation_type` "
"were passed to a frame initializer. Please use "
"only `representation_type` (`representation` is "
"now pending deprecation).")
kwargs['representation_type'] = kwargs.pop('representation')
# Need to subclass ABCMeta as well, so that this meta class can be combined
# with ShapedLikeNDArray below (which is an ABC); without it, one gets
# "TypeError: metaclass conflict: the metaclass of a derived class must be a
# (non-strict) subclass of the metaclasses of all its bases"
class FrameMeta(OrderedDescriptorContainer, abc.ABCMeta):
def __new__(mcls, name, bases, members):
if 'default_representation' in members:
default_repr = members.pop('default_representation')
found_default_repr = True
else:
default_repr = None
found_default_repr = False
if 'default_differential' in members:
default_diff = members.pop('default_differential')
found_default_diff = True
else:
default_diff = None
found_default_diff = False
if 'frame_specific_representation_info' in members:
repr_info = members.pop('frame_specific_representation_info')
found_repr_info = True
else:
repr_info = None
found_repr_info = False
# somewhat hacky, but this is the best way to get the MRO according to
# https://mail.python.org/pipermail/python-list/2002-December/167861.html
tmp_cls = super().__new__(mcls, name, bases, members)
# now look through the whole MRO for the class attributes, raw for
# frame_attr_names, and leading underscore for others
for m in (c.__dict__ for c in tmp_cls.__mro__):
if not found_default_repr and '_default_representation' in m:
default_repr = m['_default_representation']
found_default_repr = True
if not found_default_diff and '_default_differential' in m:
default_diff = m['_default_differential']
found_default_diff = True
if (not found_repr_info and
'_frame_specific_representation_info' in m):
# create a copy of the dict so we don't mess with the contents
repr_info = m['_frame_specific_representation_info'].copy()
found_repr_info = True
if found_default_repr and found_default_diff and found_repr_info:
break
else:
raise ValueError(
'Could not find all expected BaseCoordinateFrame class '
'attributes. Are you mis-using FrameMeta?')
# Unless overridden via `frame_specific_representation_info`, velocity
# name defaults are (see also docstring for BaseCoordinateFrame):
# * ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for
# `SphericalCosLatDifferential` proper motion components
# * ``pm_{lon}``, ``pm_{lat}`` for `SphericalDifferential` proper
# motion components
# * ``radial_velocity`` for any `d_distance` component
# * ``v_{x,y,z}`` for `CartesianDifferential` velocity components
# where `{lon}` and `{lat}` are the frame names of the angular
# components.
if repr_info is None:
repr_info = {}
# the tuple() call below is necessary because if it is not there,
# the iteration proceeds in a difficult-to-predict manner in the
# case that one of the class objects hash is such that it gets
# revisited by the iteration. The tuple() call prevents this by
# making the items iterated over fixed regardless of how the dict
# changes
for cls_or_name in tuple(repr_info.keys()):
if isinstance(cls_or_name, str):
# TODO: this provides a layer of backwards compatibility in
# case the key is a string, but now we want explicit classes.
cls = _get_repr_cls(cls_or_name)
repr_info[cls] = repr_info.pop(cls_or_name)
# The default spherical names are 'lon' and 'lat'
repr_info.setdefault(r.SphericalRepresentation,
[RepresentationMapping('lon', 'lon'),
RepresentationMapping('lat', 'lat')])
sph_component_map = {m.reprname: m.framename
for m in repr_info[r.SphericalRepresentation]}
repr_info.setdefault(r.SphericalCosLatDifferential, [
RepresentationMapping(
'd_lon_coslat',
'pm_{lon}_cos{lat}'.format(**sph_component_map),
u.mas/u.yr),
RepresentationMapping('d_lat',
'pm_{lat}'.format(**sph_component_map),
u.mas/u.yr),
RepresentationMapping('d_distance', 'radial_velocity',
u.km/u.s)
])
repr_info.setdefault(r.SphericalDifferential, [
RepresentationMapping('d_lon',
'pm_{lon}'.format(**sph_component_map),
u.mas/u.yr),
RepresentationMapping('d_lat',
'pm_{lat}'.format(**sph_component_map),
u.mas/u.yr),
RepresentationMapping('d_distance', 'radial_velocity',
u.km/u.s)
])
repr_info.setdefault(r.CartesianDifferential, [
RepresentationMapping('d_x', 'v_x', u.km/u.s),
RepresentationMapping('d_y', 'v_y', u.km/u.s),
RepresentationMapping('d_z', 'v_z', u.km/u.s)])
# Unit* classes should follow the same naming conventions
# TODO: this adds some unnecessary mappings for the Unit classes, so
# this could be cleaned up, but in practice doesn't seem to have any
# negative side effects
repr_info.setdefault(r.UnitSphericalRepresentation,
repr_info[r.SphericalRepresentation])
repr_info.setdefault(r.UnitSphericalCosLatDifferential,
repr_info[r.SphericalCosLatDifferential])
repr_info.setdefault(r.UnitSphericalDifferential,
repr_info[r.SphericalDifferential])
# Make read-only properties for the frame class attributes that should
# be read-only to make them immutable after creation.
# We copy attributes instead of linking to make sure there's no
# accidental cross-talk between classes
mcls.readonly_prop_factory(members, 'default_representation',
default_repr)
mcls.readonly_prop_factory(members, 'default_differential',
default_diff)
mcls.readonly_prop_factory(members,
'frame_specific_representation_info',
copy.deepcopy(repr_info))
# now set the frame name as lower-case class name, if it isn't explicit
if 'name' not in members:
members['name'] = name.lower()
return super().__new__(mcls, name, bases, members)
@staticmethod
def readonly_prop_factory(members, attr, value):
private_attr = '_' + attr
def getter(self):
return getattr(self, private_attr)
members[private_attr] = value
members[attr] = property(getter)
_RepresentationMappingBase = \
namedtuple('RepresentationMapping',
('reprname', 'framename', 'defaultunit'))
class RepresentationMapping(_RepresentationMappingBase):
"""
This `~collections.namedtuple` is used with the
``frame_specific_representation_info`` attribute to tell frames what
attribute names (and default units) to use for a particular representation.
``reprname`` and ``framename`` should be strings, while ``defaultunit`` can
be either an astropy unit, the string ``'recommended'`` (to use whatever
the representation's ``recommended_units`` is), or None (to indicate that
no unit mapping should be done).
"""
def __new__(cls, reprname, framename, defaultunit='recommended'):
# this trick just provides some defaults
return super().__new__(cls, reprname, framename, defaultunit)
base_doc = """{__doc__}
Parameters
----------
data : `BaseRepresentation` subclass instance
A representation object or ``None`` to have no data (or use the
coordinate component arguments, see below).
{components}
representation_type : `BaseRepresentation` subclass, str, optional
A representation class or string name of a representation class. This
sets the expected input representation class, thereby changing the
expected keyword arguments for the data passed in. For example, passing
``representation_type='cartesian'`` will make the classes expect
position data with cartesian names, i.e. ``x, y, z`` in most cases.
differential_type : `BaseDifferential` subclass, str, dict, optional
A differential class or dictionary of differential classes (currently
only a velocity differential with key 's' is supported). This sets the
expected input differential class, thereby changing the expected keyword
arguments of the data passed in. For example, passing
``differential_type='cartesian'`` will make the classes expect velocity
data with the argument names ``v_x, v_y, v_z``.
copy : bool, optional
If `True` (default), make copies of the input coordinate arrays.
Can only be passed in as a keyword argument.
{footer}
"""
_components = """
*args, **kwargs
Coordinate components, with names that depend on the subclass.
"""
@format_doc(base_doc, components=_components, footer="")
class BaseCoordinateFrame(ShapedLikeNDArray, metaclass=FrameMeta):
"""
The base class for coordinate frames.
This class is intended to be subclassed to create instances of specific
systems. Subclasses can implement the following attributes:
* `default_representation`
A subclass of `~astropy.coordinates.BaseRepresentation` that will be
treated as the default representation of this frame. This is the
representation assumed by default when the frame is created.
* `default_differential`
A subclass of `~astropy.coordinates.BaseDifferential` that will be
treated as the default differential class of this frame. This is the
differential class assumed by default when the frame is created.
* `~astropy.coordinates.Attribute` class attributes
Frame attributes such as ``FK4.equinox`` or ``FK4.obstime`` are defined
using a descriptor class. See the narrative documentation or
built-in classes code for details.
* `frame_specific_representation_info`
A dictionary mapping the name or class of a representation to a list of
`~astropy.coordinates.RepresentationMapping` objects that tell what
names and default units should be used on this frame for the components
of that representation.
Unless overridden via `frame_specific_representation_info`, velocity name
defaults are:
* ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for `SphericalCosLatDifferential`
proper motion components
* ``pm_{lon}``, ``pm_{lat}`` for `SphericalDifferential` proper motion
components
* ``radial_velocity`` for any ``d_distance`` component
* ``v_{x,y,z}`` for `CartesianDifferential` velocity components
where ``{lon}`` and ``{lat}`` are the frame names of the angular components.
"""
default_representation = None
default_differential = None
# Specifies special names and units for representation and differential
# attributes.
frame_specific_representation_info = {}
_inherit_descriptors_ = (Attribute,)
frame_attributes = OrderedDict()
# Default empty frame_attributes dict
def __init__(self, *args, copy=True, representation_type=None,
differential_type=None, **kwargs):
self._attr_names_with_defaults = []
# This is here for backwards compatibility. It should be possible
# to use either the kwarg representation_type, or representation.
# TODO: In future versions, we will raise a deprecation warning here:
if representation_type is not None:
kwargs['representation_type'] = representation_type
_normalize_representation_type(kwargs)
representation_type = kwargs.pop('representation_type', representation_type)
if representation_type is not None or differential_type is not None:
if representation_type is None:
representation_type = self.default_representation
if (inspect.isclass(differential_type) and
issubclass(differential_type, r.BaseDifferential)):
# TODO: assumes the differential class is for the velocity
# differential
differential_type = {'s': differential_type}
elif isinstance(differential_type, str):
# TODO: assumes the differential class is for the velocity
# differential
diff_cls = r.DIFFERENTIAL_CLASSES[differential_type]
differential_type = {'s': diff_cls}
elif differential_type is None:
if representation_type == self.default_representation:
differential_type = {'s': self.default_differential}
else:
differential_type = {'s': 'base'} # see set_representation_cls()
self.set_representation_cls(representation_type,
**differential_type)
# if not set below, this is a frame with no data
representation_data = None
differential_data = None
args = list(args) # need to be able to pop them
if (len(args) > 0) and (isinstance(args[0], r.BaseRepresentation) or
args[0] is None):
representation_data = args.pop(0)
if len(args) > 0:
raise TypeError(
'Cannot create a frame with both a representation object '
'and other positional arguments')
if representation_data is not None:
diffs = representation_data.differentials
differential_data = diffs.get('s', None)
if ((differential_data is None and len(diffs) > 0) or
(differential_data is not None and len(diffs) > 1)):
raise ValueError('Multiple differentials are associated '
'with the representation object passed in '
'to the frame initializer. Only a single '
'velocity differential is supported. Got: '
'{0}'.format(diffs))
elif self.representation_type:
representation_cls = self.get_representation_cls()
# Get any representation data passed in to the frame initializer
# using keyword or positional arguments for the component names
repr_kwargs = {}
for nmkw, nmrep in self.representation_component_names.items():
if len(args) > 0:
# first gather up positional args
repr_kwargs[nmrep] = args.pop(0)
elif nmkw in kwargs:
repr_kwargs[nmrep] = kwargs.pop(nmkw)
# special-case the Spherical->UnitSpherical if no `distance`
if repr_kwargs:
# TODO: determine how to get rid of the part before the "try" -
# currently removing it has a performance regression for
# unitspherical because of the try-related overhead.
# Also frames have no way to indicate what the "distance" is
if repr_kwargs.get('distance', True) is None:
del repr_kwargs['distance']
if (issubclass(representation_cls, r.SphericalRepresentation)
and 'distance' not in repr_kwargs):
representation_cls = representation_cls._unit_representation
try:
representation_data = representation_cls(copy=copy,
**repr_kwargs)
except TypeError as e:
# this except clause is here to make the names of the
# attributes more human-readable. Without this the names
# come from the representation instead of the frame's
# attribute names.
try:
representation_data = representation_cls._unit_representation(copy=copy,
**repr_kwargs)
except Exception as e2:
msg = str(e)
names = self.get_representation_component_names()
for frame_name, repr_name in names.items():
msg = msg.replace(repr_name, frame_name)
msg = msg.replace('__init__()',
'{0}()'.format(self.__class__.__name__))
e.args = (msg,)
raise e
# Now we handle the Differential data:
# Get any differential data passed in to the frame initializer
# using keyword or positional arguments for the component names
differential_cls = self.get_representation_cls('s')
diff_component_names = self.get_representation_component_names('s')
diff_kwargs = {}
for nmkw, nmrep in diff_component_names.items():
if len(args) > 0:
# first gather up positional args
diff_kwargs[nmrep] = args.pop(0)
elif nmkw in kwargs:
diff_kwargs[nmrep] = kwargs.pop(nmkw)
if diff_kwargs:
if (hasattr(differential_cls, '_unit_differential') and
'd_distance' not in diff_kwargs):
differential_cls = differential_cls._unit_differential
elif len(diff_kwargs) == 1 and 'd_distance' in diff_kwargs:
differential_cls = r.RadialDifferential
try:
differential_data = differential_cls(copy=copy,
**diff_kwargs)
except TypeError as e:
# this except clause is here to make the names of the
# attributes more human-readable. Without this the names
# come from the representation instead of the frame's
# attribute names.
msg = str(e)
names = self.get_representation_component_names('s')
for frame_name, repr_name in names.items():
msg = msg.replace(repr_name, frame_name)
msg = msg.replace('__init__()',
'{0}()'.format(self.__class__.__name__))
e.args = (msg,)
raise
if len(args) > 0:
raise TypeError(
'{0}.__init__ had {1} remaining unhandled arguments'.format(
self.__class__.__name__, len(args)))
if representation_data is None and differential_data is not None:
raise ValueError("Cannot pass in differential component data "
"without positional (representation) data.")
if differential_data:
self._data = representation_data.with_differentials(
{'s': differential_data})
else:
self._data = representation_data # possibly None.
values = {}
for fnm, fdefault in self.get_frame_attr_names().items():
# Read-only frame attributes are defined as FrameAttribue
# descriptors which are not settable, so set 'real' attributes as
# the name prefaced with an underscore.
if fnm in kwargs:
value = kwargs.pop(fnm)
setattr(self, '_' + fnm, value)
# Validate attribute by getting it. If the instance has data,
# this also checks its shape is OK. If not, we do it below.
values[fnm] = getattr(self, fnm)
else:
setattr(self, '_' + fnm, fdefault)
self._attr_names_with_defaults.append(fnm)
if kwargs:
raise TypeError(
'Coordinate frame got unexpected keywords: {0}'.format(
list(kwargs)))
# We do ``is None`` because self._data might evaluate to false for
# empty arrays or data == 0
if self._data is None:
# No data: we still need to check that any non-scalar attributes
# have consistent shapes. Collect them for all attributes with
# size > 1 (which should be array-like and thus have a shape).
shapes = {fnm: value.shape for fnm, value in values.items()
if getattr(value, 'size', 1) > 1}
if shapes:
if len(shapes) > 1:
try:
self._no_data_shape = check_broadcast(*shapes.values())
except ValueError:
raise ValueError(
"non-scalar attributes with inconsistent "
"shapes: {0}".format(shapes))
# Above, we checked that it is possible to broadcast all
# shapes. By getting and thus validating the attributes,
# we verify that the attributes can in fact be broadcast.
for fnm in shapes:
getattr(self, fnm)
else:
self._no_data_shape = shapes.popitem()[1]
else:
self._no_data_shape = ()
else:
# This makes the cache keys backwards-compatible, but also adds
# support for having differentials attached to the frame data
# representation object.
if 's' in self._data.differentials:
# TODO: assumes a velocity unit differential
key = (self._data.__class__.__name__,
self._data.differentials['s'].__class__.__name__,
False)
else:
key = (self._data.__class__.__name__, False)
# Set up representation cache.
self.cache['representation'][key] = self._data
@lazyproperty
def cache(self):
"""
Cache for this frame, a dict. It stores anything that should be
computed from the coordinate data (*not* from the frame attributes).
This can be used in functions to store anything that might be
expensive to compute but might be re-used by some other function.
E.g.::
if 'user_data' in myframe.cache:
data = myframe.cache['user_data']
else:
myframe.cache['user_data'] = data = expensive_func(myframe.lat)
If in-place modifications are made to the frame data, the cache should
be cleared::
myframe.cache.clear()
"""
return defaultdict(dict)
@property
def data(self):
"""
The coordinate data for this object. If this frame has no data, an
`ValueError` will be raised. Use `has_data` to
check if data is present on this frame object.
"""
if self._data is None:
raise ValueError('The frame object "{0!r}" does not have '
'associated data'.format(self))
return self._data
@property
def has_data(self):
"""
True if this frame has `data`, False otherwise.
"""
return self._data is not None
@property
def shape(self):
return self.data.shape if self.has_data else self._no_data_shape
# We have to override the ShapedLikeNDArray definitions, since our shape
# does not have to be that of the data.
def __len__(self):
return len(self.data)
def __bool__(self):
return self.has_data and self.size > 0
@property
def size(self):
return self.data.size
@property
def isscalar(self):
return self.has_data and self.data.isscalar
@classmethod
def get_frame_attr_names(cls):
return OrderedDict((name, getattr(cls, name))
for name in cls.frame_attributes)
def get_representation_cls(self, which='base'):
"""The class used for part of this frame's data.
Parameters
----------
which : ('base', 's', `None`)
The class of which part to return. 'base' means the class used to
represent the coordinates; 's' the first derivative to time, i.e.,
the class representing the proper motion and/or radial velocity.
If `None`, return a dict with both.
Returns
-------
representation : `~astropy.coordinates.BaseRepresentation` or `~astropy.coordinates.BaseDifferential`.
"""
if not hasattr(self, '_representation'):
self._representation = {'base': self.default_representation,
's': self.default_differential}
if which is not None:
return self._representation[which]
else:
return self._representation
def set_representation_cls(self, base=None, s='base'):
"""Set representation and/or differential class for this frame's data.
Parameters
----------
base : str, `~astropy.coordinates.BaseRepresentation` subclass, optional
The name or subclass to use to represent the coordinate data.
s : `~astropy.coordinates.BaseDifferential` subclass, optional
The differential subclass to use to represent any velocities,
such as proper motion and radial velocity. If equal to 'base',
which is the default, it will be inferred from the representation.
If `None`, the representation will drop any differentials.
"""
if base is None:
base = self._representation['base']
self._representation = _get_repr_classes(base=base, s=s)
representation_type = property(
fget=get_representation_cls, fset=set_representation_cls,
doc="""The representation class used for this frame's data.
This will be a subclass from `~astropy.coordinates.BaseRepresentation`.
Can also be *set* using the string name of the representation. If you
wish to set an explicit differential class (rather than have it be
inferred), use the ``set_represenation_cls`` method.
""")
@property
def differential_type(self):
"""
The differential used for this frame's data.
This will be a subclass from `~astropy.coordinates.BaseDifferential`.
For simultaneous setting of representation and differentials, see the
``set_represenation_cls`` method.
"""
return self.get_representation_cls('s')
@differential_type.setter
def differential_type(self, value):
self.set_representation_cls(s=value)
# TODO: deprecate these?
@property
def representation(self):
return self.representation_type
@representation.setter
def representation(self, value):
self.representation_type = value
@classmethod
def _get_representation_info(cls):
# This exists as a class method only to support handling frame inputs
# without units, which are deprecated and will be removed. This can be
# moved into the representation_info property at that time.
repr_attrs = {}
for repr_diff_cls in (list(r.REPRESENTATION_CLASSES.values()) +
list(r.DIFFERENTIAL_CLASSES.values())):
repr_attrs[repr_diff_cls] = {'names': [], 'units': []}
for c, c_cls in repr_diff_cls.attr_classes.items():
repr_attrs[repr_diff_cls]['names'].append(c)
# TODO: when "recommended_units" is removed, just directly use
# the default part here.
rec_unit = repr_diff_cls._recommended_units.get(
c, u.deg if issubclass(c_cls, Angle) else None)
repr_attrs[repr_diff_cls]['units'].append(rec_unit)
for repr_diff_cls, mappings in cls._frame_specific_representation_info.items():
# take the 'names' and 'units' tuples from repr_attrs,
# and then use the RepresentationMapping objects
# to update as needed for this frame.
nms = repr_attrs[repr_diff_cls]['names']
uns = repr_attrs[repr_diff_cls]['units']
comptomap = dict([(m.reprname, m) for m in mappings])
for i, c in enumerate(repr_diff_cls.attr_classes.keys()):
if c in comptomap:
mapp = comptomap[c]
nms[i] = mapp.framename
# need the isinstance because otherwise if it's a unit it
# will try to compare to the unit string representation
if not (isinstance(mapp.defaultunit, str) and
mapp.defaultunit == 'recommended'):
uns[i] = mapp.defaultunit
# else we just leave it as recommended_units says above
# Convert to tuples so that this can't mess with frame internals
repr_attrs[repr_diff_cls]['names'] = tuple(nms)
repr_attrs[repr_diff_cls]['units'] = tuple(uns)
return repr_attrs
@property
def representation_info(self):
"""
A dictionary with the information of what attribute names for this frame
apply to particular representations.
"""
return self._get_representation_info()
def get_representation_component_names(self, which='base'):
out = OrderedDict()
repr_or_diff_cls = self.get_representation_cls(which)
if repr_or_diff_cls is None:
return out
data_names = repr_or_diff_cls.attr_classes.keys()
repr_names = self.representation_info[repr_or_diff_cls]['names']
for repr_name, data_name in zip(repr_names, data_names):
out[repr_name] = data_name
return out
def get_representation_component_units(self, which='base'):
out = OrderedDict()
repr_or_diff_cls = self.get_representation_cls(which)
if repr_or_diff_cls is None:
return out
repr_attrs = self.representation_info[repr_or_diff_cls]
repr_names = repr_attrs['names']
repr_units = repr_attrs['units']
for repr_name, repr_unit in zip(repr_names, repr_units):
if repr_unit:
out[repr_name] = repr_unit
return out
representation_component_names = property(get_representation_component_names)
representation_component_units = property(get_representation_component_units)
def _replicate(self, data, copy=False, **kwargs):
"""Base for replicating a frame, with possibly different attributes.
Produces a new instance of the frame using the attributes of the old
frame (unless overridden) and with the data given.
Parameters
----------
data : `~astropy.coordinates.BaseRepresentation` or `None`
Data to use in the new frame instance. If `None`, it will be
a data-less frame.
copy : bool, optional
Whether data and the attributes on the old frame should be copied
(default), or passed on by reference.
**kwargs
Any attributes that should be overridden.
"""
# This is to provide a slightly nicer error message if the user tries
# to use frame_obj.representation instead of frame_obj.data to get the
# underlying representation object [e.g., #2890]
if inspect.isclass(data):
raise TypeError('Class passed as data instead of a representation '
'instance. If you called frame.representation, this'
' returns the representation class. frame.data '
'returns the instantiated object - you may want to '
' use this instead.')
if copy and data is not None:
data = data.copy()
for attr in self.get_frame_attr_names():
if (attr not in self._attr_names_with_defaults and
attr not in kwargs):
value = getattr(self, attr)
if copy:
value = value.copy()
kwargs[attr] = value
return self.__class__(data, copy=False, **kwargs)
def replicate(self, copy=False, **kwargs):
"""
Return a replica of the frame, optionally with new frame attributes.
The replica is a new frame object that has the same data as this frame
object and with frame attributes overriden if they are provided as extra
keyword arguments to this method. If ``copy`` is set to `True` then a
copy of the internal arrays will be made. Otherwise the replica will
use a reference to the original arrays when possible to save memory. The
internal arrays are normally not changeable by the user so in most cases
it should not be necessary to set ``copy`` to `True`.
Parameters
----------
copy : bool, optional
If True, the resulting object is a copy of the data. When False,
references are used where possible. This rule also applies to the
frame attributes.
Any additional keywords are treated as frame attributes to be set on the
new frame object.
Returns
-------
frameobj : same as this frame
Replica of this object, but possibly with new frame attributes.
"""
return self._replicate(self.data, copy=copy, **kwargs)
def replicate_without_data(self, copy=False, **kwargs):
"""
Return a replica without data, optionally with new frame attributes.
The replica is a new frame object without data but with the same frame
attributes as this object, except where overriden by extra keyword
arguments to this method. The ``copy`` keyword determines if the frame
attributes are truly copied vs being references (which saves memory for
cases where frame attributes are large).
This method is essentially the converse of `realize_frame`.
Parameters
----------
copy : bool, optional
If True, the resulting object has copies of the frame attributes.
When False, references are used where possible.
Any additional keywords are treated as frame attributes to be set on the
new frame object.
Returns
-------
frameobj : same as this frame
Replica of this object, but without data and possibly with new frame
attributes.
"""
return self._replicate(None, copy=copy, **kwargs)
def realize_frame(self, representation_type):
"""
Generates a new frame *with new data* from another frame (which may or
may not have data). Roughly speaking, the converse of
`replicate_without_data`.
Parameters
----------
representation_type : `BaseRepresentation`
The representation to use as the data for the new frame.
Returns
-------
frameobj : same as this frame
A new object with the same frame attributes as this one, but
with the ``representation`` as the data.
"""
return self._replicate(representation_type)
def represent_as(self, base, s='base', in_frame_units=False):
"""
Generate and return a new representation of this frame's `data`
as a Representation object.
Note: In order to make an in-place change of the representation
of a Frame or SkyCoord object, set the ``representation``
attribute of that object to the desired new representation, or
use the ``set_representation_cls`` method to also set the differential.
Parameters
----------
base : subclass of BaseRepresentation or string
The type of representation to generate. Must be a *class*
(not an instance), or the string name of the representation
class.
s : subclass of `~astropy.coordinates.BaseDifferential`, str, optional
Class in which any velocities should be represented. Must be
a *class* (not an instance), or the string name of the
differential class. If equal to 'base' (default), inferred from
the base class. If `None`, all velocity information is dropped.
in_frame_units : bool, keyword only
Force the representation units to match the specified units
particular to this frame
Returns
-------
newrep : BaseRepresentation-derived object
A new representation object of this frame's `data`.
Raises
------
AttributeError
If this object had no `data`
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import SkyCoord, CartesianRepresentation
>>> coord = SkyCoord(0*u.deg, 0*u.deg)
>>> coord.represent_as(CartesianRepresentation) # doctest: +FLOAT_CMP
<CartesianRepresentation (x, y, z) [dimensionless]
(1., 0., 0.)>
>>> coord.representation = CartesianRepresentation
>>> coord # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (x, y, z) [dimensionless]
(1., 0., 0.)>
"""
# For backwards compatibility (because in_frame_units used to be the
# 2nd argument), we check to see if `new_differential` is a boolean. If
# it is, we ignore the value of `new_differential` and warn about the
# position change
if isinstance(s, bool):
warnings.warn("The argument position for `in_frame_units` in "
"`represent_as` has changed. Use as a keyword "
"argument if needed.", AstropyWarning)
in_frame_units = s
s = 'base'
# In the future, we may want to support more differentials, in which
# case one probably needs to define **kwargs above and use it here.
# But for now, we only care about the velocity.
repr_classes = _get_repr_classes(base=base, s=s)
representation_cls = repr_classes['base']
# We only keep velocity information
if 's' in self.data.differentials:
differential_cls = repr_classes['s']
elif s is None or s == 'base':
differential_cls = None
else:
raise TypeError('Frame data has no associated differentials '
'(i.e. the frame has no velocity data) - '
'represent_as() only accepts a new '
'representation.')
if differential_cls:
cache_key = (representation_cls.__name__,
differential_cls.__name__, in_frame_units)
else:
cache_key = (representation_cls.__name__, in_frame_units)
cached_repr = self.cache['representation'].get(cache_key)
if not cached_repr:
if differential_cls:
# TODO NOTE: only supports a single differential
data = self.data.represent_as(representation_cls,
differential_cls)
diff = data.differentials['s'] # TODO: assumes velocity
else:
data = self.data.represent_as(representation_cls)
# If the new representation is known to this frame and has a defined
# set of names and units, then use that.
new_attrs = self.representation_info.get(representation_cls)
if new_attrs and in_frame_units:
datakwargs = dict((comp, getattr(data, comp))
for comp in data.components)
for comp, new_attr_unit in zip(data.components, new_attrs['units']):
if new_attr_unit:
datakwargs[comp] = datakwargs[comp].to(new_attr_unit)
data = data.__class__(copy=False, **datakwargs)
if differential_cls:
# the original differential
data_diff = self.data.differentials['s']
# If the new differential is known to this frame and has a
# defined set of names and units, then use that.
new_attrs = self.representation_info.get(differential_cls)
if new_attrs and in_frame_units:
diffkwargs = dict((comp, getattr(diff, comp))
for comp in diff.components)
for comp, new_attr_unit in zip(diff.components,
new_attrs['units']):
# Some special-casing to treat a situation where the
# input data has a UnitSphericalDifferential or a
# RadialDifferential. It is re-represented to the
# frame's differential class (which might be, e.g., a
# dimensional Differential), so we don't want to try to
# convert the empty component units
if (isinstance(data_diff,
(r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential)) and
comp not in data_diff.__class__.attr_classes):
continue
elif (isinstance(data_diff, r.RadialDifferential) and
comp not in data_diff.__class__.attr_classes):
continue
if new_attr_unit and hasattr(diff, comp):
diffkwargs[comp] = diffkwargs[comp].to(new_attr_unit)
diff = diff.__class__(copy=False, **diffkwargs)
# Here we have to bypass using with_differentials() because
# it has a validation check. But because
# .representation_type and .differential_type don't point to
# the original classes, if the input differential is a
# RadialDifferential, it usually gets turned into a
# SphericalCosLatDifferential (or whatever the default is)
# with strange units for the d_lon and d_lat attributes.
# This then causes the dictionary key check to fail (i.e.
# comparison against `diff._get_deriv_key()`)
data._differentials.update({'s': diff})
self.cache['representation'][cache_key] = data
return self.cache['representation'][cache_key]
def transform_to(self, new_frame):
"""
Transform this object's coordinate data to a new frame.
Parameters
----------
new_frame : class or frame object or SkyCoord object
The frame to transform this coordinate frame into.
Returns
-------
transframe
A new object with the coordinate data represented in the
``newframe`` system.
Raises
------
ValueError
If there is no possible transformation route.
"""
from .errors import ConvertError
if self._data is None:
raise ValueError('Cannot transform a frame with no data')
if (getattr(self.data, 'differentials', None) and
hasattr(self, 'obstime') and hasattr(new_frame, 'obstime') and
np.any(self.obstime != new_frame.obstime)):
raise NotImplementedError('You cannot transform a frame that has '
'velocities to another frame at a '
'different obstime. If you think this '
'should (or should not) be possible, '
'please comment at https://github.com/astropy/astropy/issues/6280')
if inspect.isclass(new_frame):
# Use the default frame attributes for this class
new_frame = new_frame()
if hasattr(new_frame, '_sky_coord_frame'):
# Input new_frame is not a frame instance or class and is most
# likely a SkyCoord object.
new_frame = new_frame._sky_coord_frame
trans = frame_transform_graph.get_transform(self.__class__,
new_frame.__class__)
if trans is None:
if new_frame is self.__class__:
# no special transform needed, but should update frame info
return new_frame.realize_frame(self.data)
msg = 'Cannot transform from {0} to {1}'
raise ConvertError(msg.format(self.__class__, new_frame.__class__))
return trans(self, new_frame)
def is_transformable_to(self, new_frame):
"""
Determines if this coordinate frame can be transformed to another
given frame.
Parameters
----------
new_frame : class or frame object
The proposed frame to transform into.
Returns
-------
transformable : bool or str
`True` if this can be transformed to ``new_frame``, `False` if
not, or the string 'same' if ``new_frame`` is the same system as
this object but no transformation is defined.
Notes
-----
A return value of 'same' means the transformation will work, but it will
just give back a copy of this object. The intended usage is::
if coord.is_transformable_to(some_unknown_frame):
coord2 = coord.transform_to(some_unknown_frame)
This will work even if ``some_unknown_frame`` turns out to be the same
frame class as ``coord``. This is intended for cases where the frame
is the same regardless of the frame attributes (e.g. ICRS), but be
aware that it *might* also indicate that someone forgot to define the
transformation between two objects of the same frame class but with
different attributes.
"""
new_frame_cls = new_frame if inspect.isclass(new_frame) else new_frame.__class__
trans = frame_transform_graph.get_transform(self.__class__, new_frame_cls)
if trans is None:
if new_frame_cls is self.__class__:
return 'same'
else:
return False
else:
return True
def is_frame_attr_default(self, attrnm):
"""
Determine whether or not a frame attribute has its value because it's
the default value, or because this frame was created with that value
explicitly requested.
Parameters
----------
attrnm : str
The name of the attribute to check.
Returns
-------
isdefault : bool
True if the attribute ``attrnm`` has its value by default, False if
it was specified at creation of this frame.
"""
return attrnm in self._attr_names_with_defaults
def is_equivalent_frame(self, other):
"""
Checks if this object is the same frame as the ``other`` object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. Note that it does *not* matter what, if any,
data either object has.
Parameters
----------
other : BaseCoordinateFrame
the other frame to check
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a `BaseCoordinateFrame` or subclass.
"""
if self.__class__ == other.__class__:
for frame_attr_name in self.get_frame_attr_names():
if np.any(getattr(self, frame_attr_name) !=
getattr(other, frame_attr_name)):
return False
return True
elif not isinstance(other, BaseCoordinateFrame):
raise TypeError("Tried to do is_equivalent_frame on something that "
"isn't a frame")
else:
return False
def __repr__(self):
frameattrs = self._frame_attrs_repr()
data_repr = self._data_repr()
if frameattrs:
frameattrs = ' ({0})'.format(frameattrs)
if data_repr:
return '<{0} Coordinate{1}: {2}>'.format(self.__class__.__name__,
frameattrs, data_repr)
else:
return '<{0} Frame{1}>'.format(self.__class__.__name__,
frameattrs)
def _data_repr(self):
"""Returns a string representation of the coordinate data."""
if not self.has_data:
return ''
if self.representation:
if (hasattr(self.representation, '_unit_representation') and
isinstance(self.data, self.representation._unit_representation)):
rep_cls = self.data.__class__
else:
rep_cls = self.representation
if 's' in self.data.differentials:
dif_cls = self.get_representation_cls('s')
dif_data = self.data.differentials['s']
if isinstance(dif_data, (r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential)):
dif_cls = dif_data.__class__
else:
dif_cls = None
data = self.represent_as(rep_cls, dif_cls, in_frame_units=True)
data_repr = repr(data)
for nmpref, nmrepr in self.representation_component_names.items():
data_repr = data_repr.replace(nmrepr, nmpref)
else:
data = self.data
data_repr = repr(self.data)
if data_repr.startswith('<' + data.__class__.__name__):
# remove both the leading "<" and the space after the name, as well
# as the trailing ">"
data_repr = data_repr[(len(data.__class__.__name__) + 2):-1]
else:
data_repr = 'Data:\n' + data_repr
if 's' in self.data.differentials:
data_repr_spl = data_repr.split('\n')
if 'has differentials' in data_repr_spl[-1]:
diffrepr = repr(data.differentials['s']).split('\n')
if diffrepr[0].startswith('<'):
diffrepr[0] = ' ' + ' '.join(diffrepr[0].split(' ')[1:])
for frm_nm, rep_nm in self.get_representation_component_names('s').items():
diffrepr[0] = diffrepr[0].replace(rep_nm, frm_nm)
if diffrepr[-1].endswith('>'):
diffrepr[-1] = diffrepr[-1][:-1]
data_repr_spl[-1] = '\n'.join(diffrepr)
data_repr = '\n'.join(data_repr_spl)
return data_repr
def _frame_attrs_repr(self):
"""
Returns a string representation of the frame's attributes, if any.
"""
return ', '.join([attrnm + '=' + str(getattr(self, attrnm))
for attrnm in self.get_frame_attr_names()])
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.misc.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
new = super().__new__(self.__class__)
if hasattr(self, '_representation'):
new._representation = self._representation.copy()
new._attr_names_with_defaults = self._attr_names_with_defaults.copy()
for attr in self.frame_attributes:
_attr = '_' + attr
if attr in self._attr_names_with_defaults:
setattr(new, _attr, getattr(self, _attr))
else:
value = getattr(self, _attr)
if getattr(value, 'size', 1) > 1:
value = apply_method(value)
elif method == 'copy' or method == 'flatten':
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, _attr, value)
if self.has_data:
new._data = apply_method(self.data)
else:
new._data = None
shapes = [getattr(new, '_' + attr).shape
for attr in new.frame_attributes
if (attr not in new._attr_names_with_defaults and
getattr(getattr(new, '_' + attr), 'size', 1) > 1)]
if shapes:
new._no_data_shape = (check_broadcast(*shapes)
if len(shapes) > 1 else shapes[0])
else:
new._no_data_shape = ()
return new
@override__dir__
def __dir__(self):
"""
Override the builtin `dir` behavior to include representation
names.
TODO: dynamic representation transforms (i.e. include cylindrical et al.).
"""
dir_values = set(self.representation_component_names)
dir_values |= set(self.get_representation_component_names('s'))
return dir_values
def __getattr__(self, attr):
"""
Allow access to attributes on the representation and differential as
found via ``self.get_representation_component_names``.
TODO: We should handle dynamic representation transforms here (e.g.,
`.cylindrical`) instead of defining properties as below.
"""
# attr == '_representation' is likely from the hasattr() test in the
# representation property which is used for
# self.representation_component_names.
#
# Prevent infinite recursion here.
if attr.startswith('_'):
return self.__getattribute__(attr) # Raise AttributeError.
repr_names = self.representation_component_names
if attr in repr_names:
if self._data is None:
self.data # this raises the "no data" error by design - doing it
# this way means we don't have to replicate the error message here
rep = self.represent_as(self.representation_type,
in_frame_units=True)
val = getattr(rep, repr_names[attr])
return val
diff_names = self.get_representation_component_names('s')
if attr in diff_names:
if self._data is None:
self.data # see above.
# TODO: this doesn't work for the case when there is only
# unitspherical information. The differential_type gets set to the
# default_differential, which expects full information, so the
# units don't work out
rep = self.represent_as(in_frame_units=True,
**self.get_representation_cls(None))
val = getattr(rep.differentials['s'], diff_names[attr])
return val
return self.__getattribute__(attr) # Raise AttributeError.
def __setattr__(self, attr, value):
# Don't slow down access of private attributes!
if not attr.startswith('_'):
if hasattr(self, 'representation_info'):
repr_attr_names = set()
for representation_attr in self.representation_info.values():
repr_attr_names.update(representation_attr['names'])
if attr in repr_attr_names:
raise AttributeError(
'Cannot set any frame attribute {0}'.format(attr))
super().__setattr__(attr, value)
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
.. note::
If the ``other`` coordinate object is in a different frame, it is
first transformed to the frame of this object. This can lead to
unintutive behavior if not accounted for. Particularly of note is
that ``self.separation(other)`` and ``other.separation(self)`` may
not give the same answer in this case.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
from .angle_utilities import angular_separation
from .angles import Angle
self_unit_sph = self.represent_as(r.UnitSphericalRepresentation)
other_transformed = other.transform_to(self)
other_unit_sph = other_transformed.represent_as(r.UnitSphericalRepresentation)
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(self_unit_sph.lon, self_unit_sph.lat,
other_unit_sph.lon, other_unit_sph.lat)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate system to get the distance to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
from .distances import Distance
if issubclass(self.data.__class__, r.UnitSphericalRepresentation):
raise ValueError('This object does not have a distance; cannot '
'compute 3d separation.')
# do this first just in case the conversion somehow creates a distance
other_in_self_system = other.transform_to(self)
if issubclass(other_in_self_system.__class__, r.UnitSphericalRepresentation):
raise ValueError('The other object does not have a distance; '
'cannot compute 3d separation.')
# drop the differentials to ensure they don't do anything odd in the
# subtraction
self_car = self.data.without_differentials().represent_as(r.CartesianRepresentation)
other_car = other_in_self_system.data.without_differentials().represent_as(r.CartesianRepresentation)
return Distance((self_car - other_car).norm())
@property
def cartesian(self):
"""
Shorthand for a cartesian representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as('cartesian', in_frame_units=True)
@property
def spherical(self):
"""
Shorthand for a spherical representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as('spherical', in_frame_units=True)
@property
def sphericalcoslat(self):
"""
Shorthand for a spherical representation of the positional data and a
`SphericalCosLatDifferential` for the velocity data in this object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as('spherical', 'sphericalcoslat',
in_frame_units=True)
@property
def velocity(self):
"""
Shorthand for retrieving the Cartesian space-motion as a
`CartesianDifferential` object. This is equivalent to calling
``self.cartesian.differentials['s']``.
"""
if 's' not in self.data.differentials:
raise ValueError('Frame has no associated velocity (Differential) '
'data information.')
try:
v = self.cartesian.differentials['s']
except Exception as e:
raise ValueError('Could not retrieve a Cartesian velocity. Your '
'frame must include velocity information for this '
'to work.')
return v
@property
def proper_motion(self):
"""
Shorthand for the two-dimensional proper motion as a
`~astropy.units.Quantity` object with angular velocity units. In the
returned `~astropy.units.Quantity`, ``axis=0`` is the longitude/latitude
dimension so that ``.proper_motion[0]`` is the longitudinal proper
motion and ``.proper_motion[1]`` is latitudinal. The longitudinal proper
motion already includes the cos(latitude) term.
"""
if 's' not in self.data.differentials:
raise ValueError('Frame has no associated velocity (Differential) '
'data information.')
sph = self.represent_as('spherical', 'sphericalcoslat',
in_frame_units=True)
pm_lon = sph.differentials['s'].d_lon_coslat
pm_lat = sph.differentials['s'].d_lat
return np.stack((pm_lon.value,
pm_lat.to(pm_lon.unit).value), axis=0) * pm_lon.unit
@property
def radial_velocity(self):
"""
Shorthand for the radial or line-of-sight velocity as a
`~astropy.units.Quantity` object.
"""
if 's' not in self.data.differentials:
raise ValueError('Frame has no associated velocity (Differential) '
'data information.')
sph = self.represent_as('spherical', in_frame_units=True)
return sph.differentials['s'].d_distance
class GenericFrame(BaseCoordinateFrame):
"""
A frame object that can't store data but can hold any arbitrary frame
attributes. Mostly useful as a utility for the high-level class to store
intermediate frame attributes.
Parameters
----------
frame_attrs : dict
A dictionary of attributes to be used as the frame attributes for this
frame.
"""
name = None # it's not a "real" frame so it doesn't have a name
def __init__(self, frame_attrs):
self.frame_attributes = OrderedDict()
for name, default in frame_attrs.items():
self.frame_attributes[name] = Attribute(default)
setattr(self, '_' + name, default)
super().__init__(None)
def __getattr__(self, name):
if '_' + name in self.__dict__:
return getattr(self, '_' + name)
else:
raise AttributeError('no {0}'.format(name))
def __setattr__(self, name, value):
if name in self.get_frame_attr_names():
raise AttributeError("can't set frame attribute '{0}'".format(name))
else:
super().__setattr__(name, value)
|
9b06a5a35f0abafdcb1dc878f91c8f2fb7c5c46c54dc2dd3bae1eddb60bd4128 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the fundamental classes used for representing
coordinates in astropy.
"""
from collections import namedtuple
import numpy as np
from . import angle_utilities as util
from .. import units as u
from ..utils import isiterable
from ..utils.compat import NUMPY_LT_1_14_1, NUMPY_LT_1_14_2
__all__ = ['Angle', 'Latitude', 'Longitude']
# these are used by the `hms` and `dms` attributes
hms_tuple = namedtuple('hms_tuple', ('h', 'm', 's'))
dms_tuple = namedtuple('dms_tuple', ('d', 'm', 's'))
signed_dms_tuple = namedtuple('signed_dms_tuple', ('sign', 'd', 'm', 's'))
class Angle(u.SpecificTypeQuantity):
"""
One or more angular value(s) with units equivalent to radians or degrees.
An angle can be specified either as an array, scalar, tuple (see
below), string, `~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports a variety of formats::
Angle('10.2345d')
Angle(['10.2345d', '-20d'])
Angle('1:2:30.43 degrees')
Angle('1 2 0 hours')
Angle(np.arange(1, 8), unit=u.deg)
Angle('1°2′3″')
Angle('1d2m3.4s')
Angle('-1h2m3s')
Angle('-1h2.5m')
Angle('-1:2.5', unit=u.deg)
Angle((10, 11, 12), unit='hourangle') # (h, m, s)
Angle((-1, 2, 3), unit=u.deg) # (d, m, s)
Angle(10.2345 * u.deg)
Angle(Angle(10.2345 * u.deg))
Parameters
----------
angle : `~numpy.array`, scalar, `~astropy.units.Quantity`, :class:`~astropy.coordinates.Angle`
The angle value. If a tuple, will be interpreted as ``(h, m,
s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it
will be interpreted following the rules described above.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : `~astropy.units.UnitBase`, str, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
"""
_equivalent_unit = u.radian
_include_easy_conversion_members = True
def __new__(cls, angle, unit=None, dtype=None, copy=True):
if not isinstance(angle, u.Quantity):
if unit is not None:
unit = cls._convert_unit_to_angle_unit(u.Unit(unit))
if isinstance(angle, tuple):
angle = cls._tuple_to_float(angle, unit)
elif isinstance(angle, str):
angle, angle_unit = util.parse_angle(angle, unit)
if angle_unit is None:
angle_unit = unit
if isinstance(angle, tuple):
angle = cls._tuple_to_float(angle, angle_unit)
if angle_unit is not unit:
# Possible conversion to `unit` will be done below.
angle = u.Quantity(angle, angle_unit, copy=False)
elif (isiterable(angle) and
not (isinstance(angle, np.ndarray) and
angle.dtype.kind not in 'SUVO')):
angle = [Angle(x, unit, copy=False) for x in angle]
return super().__new__(cls, angle, unit, dtype=dtype, copy=copy)
@staticmethod
def _tuple_to_float(angle, unit):
"""
Converts an angle represented as a 3-tuple or 2-tuple into a floating
point number in the given unit.
"""
# TODO: Numpy array of tuples?
if unit == u.hourangle:
return util.hms_to_hours(*angle)
elif unit == u.degree:
return util.dms_to_degrees(*angle)
else:
raise u.UnitsError("Can not parse '{0}' as unit '{1}'"
.format(angle, unit))
@staticmethod
def _convert_unit_to_angle_unit(unit):
return u.hourangle if unit is u.hour else unit
def _set_unit(self, unit):
super()._set_unit(self._convert_unit_to_angle_unit(unit))
@property
def hour(self):
"""
The angle's value in hours (read-only property).
"""
return self.hourangle
@property
def hms(self):
"""
The angle's value in hours, as a named tuple with ``(h, m, s)``
members. (This is a read-only property.)
"""
return hms_tuple(*util.hours_to_hms(self.hourangle))
@property
def dms(self):
"""
The angle's value in degrees, as a named tuple with ``(d, m, s)``
members. (This is a read-only property.)
"""
return dms_tuple(*util.degrees_to_dms(self.degree))
@property
def signed_dms(self):
"""
The angle's value in degrees, as a named tuple with ``(sign, d, m, s)``
members. The ``d``, ``m``, ``s`` are thus always positive, and the sign of
the angle is given by ``sign``. (This is a read-only property.)
This is primarily intended for use with `dms` to generate string
representations of coordinates that are correct for negative angles.
"""
return signed_dms_tuple(np.sign(self.degree),
*util.degrees_to_dms(np.abs(self.degree)))
def to_string(self, unit=None, decimal=False, sep='fromunit',
precision=None, alwayssign=False, pad=False,
fields=3, format=None):
""" A string representation of the angle.
Parameters
----------
unit : `~astropy.units.UnitBase`, optional
Specifies the unit. Must be an angular unit. If not
provided, the unit used to initialize the angle will be
used.
decimal : bool, optional
If `True`, a decimal representation will be used, otherwise
the returned string will be in sexagesimal form.
sep : str, optional
The separator between numbers in a sexagesimal
representation. E.g., if it is ':', the result is
``'12:41:11.1241'``. Also accepts 2 or 3 separators. E.g.,
``sep='hms'`` would give the result ``'12h41m11.1241s'``, or
sep='-:' would yield ``'11-21:17.124'``. Alternatively, the
special string 'fromunit' means 'dms' if the unit is
degrees, or 'hms' if the unit is hours.
precision : int, optional
The level of decimal precision. If ``decimal`` is `True`,
this is the raw precision, otherwise it gives the
precision of the last place of the sexagesimal
representation (seconds). If `None`, or not provided, the
number of decimal places is determined by the value, and
will be between 0-8 decimal places as required.
alwayssign : bool, optional
If `True`, include the sign no matter what. If `False`,
only include the sign if it is negative.
pad : bool, optional
If `True`, include leading zeros when needed to ensure a
fixed number of characters for sexagesimal representation.
fields : int, optional
Specifies the number of fields to display when outputting
sexagesimal notation. For example:
- fields == 1: ``'5d'``
- fields == 2: ``'5d45m'``
- fields == 3: ``'5d45m32.5s'``
By default, all fields are displayed.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'unicode': Return a string containing non-ASCII unicode
characters, such as the degree symbol
Returns
-------
strrepr : str or array
A string representation of the angle. If the angle is an array, this
will be an array with a unicode dtype.
"""
if unit is None:
unit = self.unit
else:
unit = self._convert_unit_to_angle_unit(u.Unit(unit))
separators = {
None: {
u.degree: 'dms',
u.hourangle: 'hms'},
'latex': {
u.degree: [r'^\circ', r'{}^\prime', r'{}^{\prime\prime}'],
u.hourangle: [r'^\mathrm{h}', r'^\mathrm{m}', r'^\mathrm{s}']},
'unicode': {
u.degree: '°′″',
u.hourangle: 'ʰᵐˢ'}
}
if sep == 'fromunit':
if format not in separators:
raise ValueError("Unknown format '{0}'".format(format))
seps = separators[format]
if unit in seps:
sep = seps[unit]
# Create an iterator so we can format each element of what
# might be an array.
if unit is u.degree:
if decimal:
values = self.degree
if precision is not None:
func = ("{0:0." + str(precision) + "f}").format
else:
func = '{0:g}'.format
else:
if sep == 'fromunit':
sep = 'dms'
values = self.degree
func = lambda x: util.degrees_to_string(
x, precision=precision, sep=sep, pad=pad,
fields=fields)
elif unit is u.hourangle:
if decimal:
values = self.hour
if precision is not None:
func = ("{0:0." + str(precision) + "f}").format
else:
func = '{0:g}'.format
else:
if sep == 'fromunit':
sep = 'hms'
values = self.hour
func = lambda x: util.hours_to_string(
x, precision=precision, sep=sep, pad=pad,
fields=fields)
elif unit.is_equivalent(u.radian):
if decimal:
values = self.to_value(unit)
if precision is not None:
func = ("{0:1." + str(precision) + "f}").format
else:
func = "{0:g}".format
elif sep == 'fromunit':
values = self.to_value(unit)
unit_string = unit.to_string(format=format)
if format == 'latex':
unit_string = unit_string[1:-1]
if precision is not None:
def plain_unit_format(val):
return ("{0:0." + str(precision) + "f}{1}").format(
val, unit_string)
func = plain_unit_format
else:
def plain_unit_format(val):
return "{0:g}{1}".format(val, unit_string)
func = plain_unit_format
else:
raise ValueError(
"'{0}' can not be represented in sexagesimal "
"notation".format(
unit.name))
else:
raise u.UnitsError(
"The unit value provided is not an angular unit.")
def do_format(val):
s = func(float(val))
if alwayssign and not s.startswith('-'):
s = '+' + s
if format == 'latex':
s = '${0}$'.format(s)
return s
format_ufunc = np.vectorize(do_format, otypes=['U'])
result = format_ufunc(values)
if result.ndim == 0:
result = result[()]
return result
def wrap_at(self, wrap_angle, inplace=False):
"""
Wrap the `Angle` object at the given ``wrap_angle``.
This method forces all the angle values to be within a contiguous
360 degree range so that ``wrap_angle - 360d <= angle <
wrap_angle``. By default a new Angle object is returned, but if the
``inplace`` argument is `True` then the `Angle` object is wrapped in
place and nothing is returned.
For instance::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20.0, 150.0, 350.0] * u.deg)
>>> a.wrap_at(360 * u.deg).degree # Wrap into range 0 to 360 degrees # doctest: +FLOAT_CMP
array([340., 150., 350.])
>>> a.wrap_at('180d', inplace=True) # Wrap into range -180 to 180 degrees # doctest: +FLOAT_CMP
>>> a.degree # doctest: +FLOAT_CMP
array([-20., 150., -10.])
Parameters
----------
wrap_angle : str, `Angle`, angular `~astropy.units.Quantity`
Specifies a single value for the wrap angle. This can be any
object that can initialize an `Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
inplace : bool
If `True` then wrap the object in place instead of returning
a new `Angle`
Returns
-------
out : Angle or `None`
If ``inplace is False`` (default), return new `Angle` object
with angles wrapped accordingly. Otherwise wrap in place and
return `None`.
"""
wrap_angle = Angle(wrap_angle) # Convert to an Angle
wrapped = np.mod(self - wrap_angle, 360.0 * u.deg) - (360.0 * u.deg - wrap_angle)
if inplace:
self[()] = wrapped
else:
return wrapped
def is_within_bounds(self, lower=None, upper=None):
"""
Check if all angle(s) satisfy ``lower <= angle < upper``
If ``lower`` is not specified (or `None`) then no lower bounds check is
performed. Likewise ``upper`` can be left unspecified. For example::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20, 150, 350] * u.deg)
>>> a.is_within_bounds('0d', '360d')
False
>>> a.is_within_bounds(None, '360d')
True
>>> a.is_within_bounds(-30 * u.deg, None)
True
Parameters
----------
lower : str, `Angle`, angular `~astropy.units.Quantity`, `None`
Specifies lower bound for checking. This can be any object
that can initialize an `Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
upper : str, `Angle`, angular `~astropy.units.Quantity`, `None`
Specifies upper bound for checking. This can be any object
that can initialize an `Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
Returns
-------
is_within_bounds : bool
`True` if all angles satisfy ``lower <= angle < upper``
"""
ok = True
if lower is not None:
ok &= np.all(Angle(lower) <= self)
if ok and upper is not None:
ok &= np.all(self < Angle(upper))
return bool(ok)
def _str_helper(self, format=None):
if self.isscalar:
return self.to_string(format=format)
if NUMPY_LT_1_14_1 or not NUMPY_LT_1_14_2:
def formatter(x):
return x.to_string(format=format)
else:
# In numpy 1.14.1, array2print formatters get passed plain numpy scalars instead
# of subclass array scalars, so we need to recreate an array scalar.
def formatter(x):
return self._new_view(x).to_string(format=format)
return np.array2string(self, formatter={'all': formatter})
def __str__(self):
return self._str_helper()
def _repr_latex_(self):
return self._str_helper(format='latex')
def _no_angle_subclass(obj):
"""Return any Angle subclass objects as an Angle objects.
This is used to ensure that Latitute and Longitude change to Angle
objects when they are used in calculations (such as lon/2.)
"""
if isinstance(obj, tuple):
return tuple(_no_angle_subclass(_obj) for _obj in obj)
return obj.view(Angle) if isinstance(obj, Angle) else obj
class Latitude(Angle):
"""
Latitude-like angle(s) which must be in the range -90 to +90 deg.
A Latitude object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of being constrained
so that::
-90.0 * u.deg <= angle(s) <= +90.0 * u.deg
Any attempt to set a value outside that range will result in a
`ValueError`.
The input angle(s) can be specified either as an array, list,
scalar, tuple (see below), string,
:class:`~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : array, list, scalar, `~astropy.units.Quantity`, `Angle`. The
angle value(s). If a tuple, will be interpreted as ``(h, m, s)`` or
``(d, m, s)`` depending on ``unit``. If a string, it will be
interpreted following the rules described for
:class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : :class:`~astropy.units.UnitBase`, str, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Longitude`.
"""
def __new__(cls, angle, unit=None, **kwargs):
# Forbid creating a Lat from a Long.
if isinstance(angle, Longitude):
raise TypeError("A Latitude angle cannot be created from a Longitude angle")
self = super().__new__(cls, angle, unit=unit, **kwargs)
self._validate_angles()
return self
def _validate_angles(self, angles=None):
"""Check that angles are between -90 and 90 degrees.
If not given, the check is done on the object itself"""
# Convert the lower and upper bounds to the "native" unit of
# this angle. This limits multiplication to two values,
# rather than the N values in `self.value`. Also, the
# comparison is performed on raw arrays, rather than Quantity
# objects, for speed.
if angles is None:
angles = self
lower = u.degree.to(angles.unit, -90.0)
upper = u.degree.to(angles.unit, 90.0)
if np.any(angles.value < lower) or np.any(angles.value > upper):
raise ValueError('Latitude angle(s) must be within -90 deg <= angle <= 90 deg, '
'got {0}'.format(angles.to(u.degree)))
def __setitem__(self, item, value):
# Forbid assigning a Long to a Lat.
if isinstance(value, Longitude):
raise TypeError("A Longitude angle cannot be assigned to a Latitude angle")
# first check bounds
self._validate_angles(value)
super().__setitem__(item, value)
# Any calculation should drop to Angle
def __array_wrap__(self, obj, context=None):
obj = super().__array_wrap__(obj, context=context)
return _no_angle_subclass(obj)
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
class LongitudeInfo(u.QuantityInfo):
_represent_as_dict_attrs = u.QuantityInfo._represent_as_dict_attrs + ('wrap_angle',)
class Longitude(Angle):
"""
Longitude-like angle(s) which are wrapped within a contiguous 360 degree range.
A ``Longitude`` object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of a ``wrap_angle``
property. The ``wrap_angle`` specifies that all angle values
represented by the object will be in the range::
wrap_angle - 360 * u.deg <= angle(s) < wrap_angle
The default ``wrap_angle`` is 360 deg. Setting ``wrap_angle=180 *
u.deg`` would instead result in values between -180 and +180 deg.
Setting the ``wrap_angle`` attribute of an existing ``Longitude``
object will result in re-wrapping the angle values in-place.
The input angle(s) can be specified either as an array, list,
scalar, tuple, string, :class:`~astropy.units.Quantity`
or another :class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : array, list, scalar, `~astropy.units.Quantity`,
:class:`~astropy.coordinates.Angle` The angle value(s). If a tuple,
will be interpreted as ``(h, m s)`` or ``(d, m, s)`` depending
on ``unit``. If a string, it will be interpreted following the
rules described for :class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : :class:`~astropy.units.UnitBase`, str, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
wrap_angle : :class:`~astropy.coordinates.Angle` or equivalent, or None
Angle at which to wrap back to ``wrap_angle - 360 deg``.
If ``None`` (default), it will be taken to be 360 deg unless ``angle``
has a ``wrap_angle`` attribute already (i.e., is a ``Longitude``),
in which case it will be taken from there.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Latitude`.
"""
_wrap_angle = None
_default_wrap_angle = Angle(360 * u.deg)
info = LongitudeInfo()
def __new__(cls, angle, unit=None, wrap_angle=None, **kwargs):
# Forbid creating a Long from a Lat.
if isinstance(angle, Latitude):
raise TypeError("A Longitude angle cannot be created from "
"a Latitude angle.")
self = super().__new__(cls, angle, unit=unit, **kwargs)
if wrap_angle is None:
wrap_angle = getattr(angle, 'wrap_angle', self._default_wrap_angle)
self.wrap_angle = wrap_angle
return self
def __setitem__(self, item, value):
# Forbid assigning a Lat to a Long.
if isinstance(value, Latitude):
raise TypeError("A Latitude angle cannot be assigned to a Longitude angle")
super().__setitem__(item, value)
self._wrap_internal()
def _wrap_internal(self):
"""
Wrap the internal values in the Longitude object. Using the
:meth:`~astropy.coordinates.Angle.wrap_at` method causes
recursion.
"""
# Convert the wrap angle and 360 degrees to the native unit of
# this Angle, then do all the math on raw Numpy arrays rather
# than Quantity objects for speed.
a360 = u.degree.to(self.unit, 360.0)
wrap_angle = self.wrap_angle.to_value(self.unit)
wrap_angle_floor = wrap_angle - a360
self_angle = self.value
# Do the wrapping, but only if any angles need to be wrapped
if np.any(self_angle < wrap_angle_floor) or np.any(self_angle >= wrap_angle):
wrapped = np.mod(self_angle - wrap_angle, a360) + wrap_angle_floor
value = u.Quantity(wrapped, self.unit)
super().__setitem__((), value)
@property
def wrap_angle(self):
return self._wrap_angle
@wrap_angle.setter
def wrap_angle(self, value):
self._wrap_angle = Angle(value)
self._wrap_internal()
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._wrap_angle = getattr(obj, '_wrap_angle',
self._default_wrap_angle)
# Any calculation should drop to Angle
def __array_wrap__(self, obj, context=None):
obj = super().__array_wrap__(obj, context=context)
return _no_angle_subclass(obj)
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
|
f15764fcbd675f3b97ac70596056484f7f2ce8c37641d5c5f3cb7aa4a84d09f7 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Dependencies
import numpy as np
import warnings
# Project
from .. import units as u
from ..utils.exceptions import AstropyDeprecationWarning
from ..utils import OrderedDescriptor, ShapedLikeNDArray
__all__ = ['Attribute', 'TimeAttribute', 'QuantityAttribute',
'EarthLocationAttribute', 'CoordinateAttribute',
'CartesianRepresentationAttribute',
'DifferentialAttribute']
class Attribute(OrderedDescriptor):
"""A non-mutable data descriptor to hold a frame attribute.
This class must be used to define frame attributes (e.g. ``equinox`` or
``obstime``) that are included in a frame class definition.
Examples
--------
The `~astropy.coordinates.FK4` class uses the following class attributes::
class FK4(BaseCoordinateFrame):
equinox = TimeAttribute(default=_EQUINOX_B1950)
obstime = TimeAttribute(default=None,
secondary_attribute='equinox')
This means that ``equinox`` and ``obstime`` are available to be set as
keyword arguments when creating an ``FK4`` class instance and are then
accessible as instance attributes. The instance value for the attribute
must be stored in ``'_' + <attribute_name>`` by the frame ``__init__``
method.
Note in this example that ``equinox`` and ``obstime`` are time attributes
and use the ``TimeAttributeFrame`` class. This subclass overrides the
``convert_input`` method to validate and convert inputs into a ``Time``
object.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
_class_attribute_ = 'frame_attributes'
_name_attribute_ = 'name'
name = '<unbound>'
def __init__(self, default=None, secondary_attribute=''):
self.default = default
self.secondary_attribute = secondary_attribute
super().__init__()
def convert_input(self, value):
"""
Validate the input ``value`` and convert to expected attribute class.
The base method here does nothing, but subclasses can implement this
as needed. The method should catch any internal exceptions and raise
ValueError with an informative message.
The method returns the validated input along with a boolean that
indicates whether the input value was actually converted. If the input
value was already the correct type then the ``converted`` return value
should be ``False``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
output_value
The ``value`` converted to the correct type (or just ``value`` if
``converted`` is False)
converted : bool
True if the conversion was actually performed, False otherwise.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
return value, False
def __get__(self, instance, frame_cls=None):
if instance is None:
out = self.default
else:
out = getattr(instance, '_' + self.name, self.default)
if out is None:
out = getattr(instance, self.secondary_attribute, self.default)
out, converted = self.convert_input(out)
if instance is not None:
instance_shape = getattr(instance, 'shape', None)
if instance_shape is not None and (getattr(out, 'size', 1) > 1 and
out.shape != instance_shape):
# If the shapes do not match, try broadcasting.
try:
if isinstance(out, ShapedLikeNDArray):
out = out._apply(np.broadcast_to, shape=instance_shape,
subok=True)
else:
out = np.broadcast_to(out, instance_shape, subok=True)
except ValueError:
# raise more informative exception.
raise ValueError(
"attribute {0} should be scalar or have shape {1}, "
"but is has shape {2} and could not be broadcast."
.format(self.name, instance_shape, out.shape))
converted = True
if converted:
setattr(instance, '_' + self.name, out)
return out
def __set__(self, instance, val):
raise AttributeError('Cannot set frame attribute')
class TimeAttribute(Attribute):
"""
Frame attribute descriptor for quantities that are Time objects.
See the `~astropy.coordinates.Attribute` API doc for further
information.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Convert input value to a Time object and validate by running through
the Time constructor. Also check that the input was a scalar.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from ..time import Time
if value is None:
return None, False
if isinstance(value, Time):
out = value
converted = False
else:
try:
out = Time(value)
except Exception as err:
raise ValueError(
'Invalid time input {0}={1!r}\n{2}'.format(self.name,
value, err))
converted = True
# Set attribute as read-only for arrays (not allowed by numpy
# for array scalars)
if out.shape:
out.writeable = False
return out, converted
class CartesianRepresentationAttribute(Attribute):
"""
A frame attribute that is a CartesianRepresentation with specified units.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit object or None
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
"""
def __init__(self, default=None, secondary_attribute='', unit=None):
super().__init__(default, secondary_attribute)
self.unit = unit
def convert_input(self, value):
"""
Checks that the input is a CartesianRepresentation with the correct
unit, or the special value ``[0, 0, 0]``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if (isinstance(value, list) and len(value) == 3 and
all(v == 0 for v in value) and self.unit is not None):
return CartesianRepresentation(np.zeros(3) * self.unit), True
else:
# is it a CartesianRepresentation with correct unit?
if hasattr(value, 'xyz') and value.xyz.unit == self.unit:
return value, False
converted = True
# if it's a CartesianRepresentation, get the xyz Quantity
value = getattr(value, 'xyz', value)
if not hasattr(value, 'unit'):
raise TypeError('tried to set a {0} with something that does '
'not have a unit.'
.format(self.__class__.__name__))
value = value.to(self.unit)
# now try and make a CartesianRepresentation.
cartrep = CartesianRepresentation(value, copy=False)
return cartrep, converted
class QuantityAttribute(Attribute):
"""
A frame attribute that is a quantity with specified units and shape
(optionally).
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit object or None
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
shape : tuple or None
If given, specifies the shape the attribute must be
"""
def __init__(self, default=None, secondary_attribute='', unit=None, shape=None):
super().__init__(default, secondary_attribute)
self.unit = unit
self.shape = shape
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if np.all(value == 0) and self.unit is not None:
return u.Quantity(np.zeros(self.shape), self.unit), True
else:
if not hasattr(value, 'unit'):
raise TypeError('Tried to set a QuantityAttribute with '
'something that does not have a unit.')
oldvalue = value
value = u.Quantity(oldvalue, self.unit, copy=False)
if self.shape is not None and value.shape != self.shape:
raise ValueError('The provided value has shape "{0}", but '
'should have shape "{1}"'.format(value.shape,
self.shape))
converted = oldvalue is not value
return value, converted
class EarthLocationAttribute(Attribute):
"""
A frame attribute that can act as a `~astropy.coordinates.EarthLocation`.
It can be created as anything that can be transformed to the
`~astropy.coordinates.ITRS` frame, but always presents as an `EarthLocation`
when accessed after creation.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
elif isinstance(value, EarthLocation):
return value, False
else:
# we have to do the import here because of some tricky circular deps
from .builtin_frames import ITRS
if not hasattr(value, 'transform_to'):
raise ValueError('"{0}" was passed into an '
'EarthLocationAttribute, but it does not have '
'"transform_to" method'.format(value))
itrsobj = value.transform_to(ITRS)
return itrsobj.earth_location, True
class CoordinateAttribute(Attribute):
"""
A frame attribute which is a coordinate object. It can be given as a
low-level frame class *or* a `~astropy.coordinates.SkyCoord`, but will
always be converted to the low-level frame class when accessed.
Parameters
----------
frame : a coordinate frame class
The type of frame this attribute can be
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, frame, default=None, secondary_attribute=''):
self._frame = frame
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a SkyCoord with the necessary units (or the
special value ``None``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
elif isinstance(value, self._frame):
return value, False
else:
if not hasattr(value, 'transform_to'):
raise ValueError('"{0}" was passed into a '
'CoordinateAttribute, but it does not have '
'"transform_to" method'.format(value))
transformedobj = value.transform_to(self._frame)
if hasattr(transformedobj, 'frame'):
transformedobj = transformedobj.frame
return transformedobj, True
class DifferentialAttribute(Attribute):
"""A frame attribute which is a differential instance.
The optional ``allowed_classes`` argument allows specifying a restricted
set of valid differential classes to check the input against. Otherwise,
any `~astropy.coordinates.BaseDifferential` subclass instance is valid.
Parameters
----------
default : object
Default value for the attribute if not provided
allowed_classes : tuple, optional
A list of allowed differential classes for this attribute to have.
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, default=None, allowed_classes=None,
secondary_attribute=''):
if allowed_classes is not None:
self.allowed_classes = tuple(allowed_classes)
else:
self.allowed_classes = BaseDifferential
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a differential object and is one of the
allowed class types.
Parameters
----------
value : object
Input value.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if not isinstance(value, self.allowed_classes):
raise TypeError('Tried to set a DifferentialAttribute with '
'an unsupported Differential type {0}. Allowed '
'classes are: {1}'
.format(value.__class__,
self.allowed_classes))
return value, True
# Backwards-compatibility: these are the only classes that were previously
# released in v1.3
class FrameAttribute(Attribute):
def __init__(self, *args, **kwargs):
warnings.warn("FrameAttribute has been renamed to Attribute.",
AstropyDeprecationWarning)
super().__init__(*args, **kwargs)
class TimeFrameAttribute(TimeAttribute):
def __init__(self, *args, **kwargs):
warnings.warn("TimeFrameAttribute has been renamed to TimeAttribute.",
AstropyDeprecationWarning)
super().__init__(*args, **kwargs)
class QuantityFrameAttribute(QuantityAttribute):
def __init__(self, *args, **kwargs):
warnings.warn("QuantityFrameAttribute has been renamed to "
"QuantityAttribute.", AstropyDeprecationWarning)
super().__init__(*args, **kwargs)
class CartesianRepresentationFrameAttribute(CartesianRepresentationAttribute):
def __init__(self, *args, **kwargs):
warnings.warn("CartesianRepresentationFrameAttribute has been renamed "
"to CartesianRepresentationAttribute.",
AstropyDeprecationWarning)
super().__init__(*args, **kwargs)
# do this here to prevent a series of complicated circular imports
from .earth import EarthLocation
from .representation import CartesianRepresentation, BaseDifferential
|
f6ddee412d9e18cff51a94c5bab5c1bfa456f42ee0a2885f910983073b930f78 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Standard library
import re
import textwrap
import warnings
from datetime import datetime
from urllib.request import urlopen
# Third-party
from .. import time as atime
from ..utils.console import color_print, _color_text
from . import get_sun
__all__ = []
class HumanError(ValueError): pass
class CelestialError(ValueError): pass
def get_sign(dt):
"""
"""
if ((int(dt.month) == 12 and int(dt.day) >= 22)or(int(dt.month) == 1 and int(dt.day) <= 19)):
zodiac_sign = "capricorn"
elif ((int(dt.month) == 1 and int(dt.day) >= 20)or(int(dt.month) == 2 and int(dt.day) <= 17)):
zodiac_sign = "aquarius"
elif ((int(dt.month) == 2 and int(dt.day) >= 18)or(int(dt.month) == 3 and int(dt.day) <= 19)):
zodiac_sign = "pisces"
elif ((int(dt.month) == 3 and int(dt.day) >= 20)or(int(dt.month) == 4 and int(dt.day) <= 19)):
zodiac_sign = "aries"
elif ((int(dt.month) == 4 and int(dt.day) >= 20)or(int(dt.month) == 5 and int(dt.day) <= 20)):
zodiac_sign = "taurus"
elif ((int(dt.month) == 5 and int(dt.day) >= 21)or(int(dt.month) == 6 and int(dt.day) <= 20)):
zodiac_sign = "gemini"
elif ((int(dt.month) == 6 and int(dt.day) >= 21)or(int(dt.month) == 7 and int(dt.day) <= 22)):
zodiac_sign = "cancer"
elif ((int(dt.month) == 7 and int(dt.day) >= 23)or(int(dt.month) == 8 and int(dt.day) <= 22)):
zodiac_sign = "leo"
elif ((int(dt.month) == 8 and int(dt.day) >= 23)or(int(dt.month) == 9 and int(dt.day) <= 22)):
zodiac_sign = "virgo"
elif ((int(dt.month) == 9 and int(dt.day) >= 23)or(int(dt.month) == 10 and int(dt.day) <= 22)):
zodiac_sign = "libra"
elif ((int(dt.month) == 10 and int(dt.day) >= 23)or(int(dt.month) == 11 and int(dt.day) <= 21)):
zodiac_sign = "scorpio"
elif ((int(dt.month) == 11 and int(dt.day) >= 22)or(int(dt.month) == 12 and int(dt.day) <= 21)):
zodiac_sign = "sagittarius"
return zodiac_sign
_VALID_SIGNS = ["capricorn", "aquarius", "pisces", "aries", "taurus", "gemini",
"cancer", "leo", "virgo", "libra", "scorpio", "sagittarius"]
# Some of the constellation names map to different astrological "sign names".
# Astrologers really needs to talk to the IAU...
_CONST_TO_SIGNS = {'capricornus': 'capricorn', 'scorpius': 'scorpio'}
_ZODIAC = ((1900, "rat"), (1901, "ox"), (1902, "tiger"),
(1903, "rabbit"), (1904, "dragon"), (1905, "snake"),
(1906, "horse"), (1907, "goat"), (1908, "monkey"),
(1909, "rooster"), (1910, "dog"), (1911, "pig"))
# https://stackoverflow.com/questions/12791871/chinese-zodiac-python-program
def _get_zodiac(yr):
return _ZODIAC[(yr - _ZODIAC[0][0]) % 12][1]
def horoscope(birthday, corrected=True, chinese=False):
"""
Enter your birthday as an `astropy.time.Time` object and
receive a mystical horoscope about things to come.
Parameter
---------
birthday : `astropy.time.Time` or str
Your birthday as a `datetime.datetime` or `astropy.time.Time` object
or "YYYY-MM-DD"string.
corrected : bool
Whether to account for the precession of the Earth instead of using the
ancient Greek dates for the signs. After all, you do want your *real*
horoscope, not a cheap inaccurate approximation, right?
chinese : bool
Chinese annual zodiac wisdom instead of Western one.
Returns
-------
Infinite wisdom, condensed into astrologically precise prose.
Notes
-----
This function was implemented on April 1. Take note of that date.
"""
today = datetime.now()
err_msg = "Invalid response from celestial gods (failed to load horoscope)."
special_words = {
'([sS]tar[s^ ]*)': 'yellow',
'([yY]ou[^ ]*)': 'magenta',
'([pP]lay[^ ]*)': 'blue',
'([hH]eart)': 'red',
'([fF]ate)': 'lightgreen',
}
if isinstance(birthday, str):
birthday = datetime.strptime(birthday, '%Y-%m-%d')
if chinese:
from bs4 import BeautifulSoup
# TODO: Make this more accurate by using the actual date, not just year
# Might need third-party tool like https://pypi.python.org/pypi/lunardate
zodiac_sign = _get_zodiac(birthday.year)
url = ('https://www.horoscope.com/us/horoscopes/yearly/'
'{}-chinese-horoscope-{}.aspx'.format(today.year, zodiac_sign))
summ_title_sfx = 'in {}'.format(today.year)
try:
with urlopen(url) as f:
try:
doc = BeautifulSoup(f, 'html.parser')
# TODO: Also include Love, Family & Friends, Work, Money, More?
item = doc.find(id='overview')
desc = item.getText()
except Exception:
raise CelestialError(err_msg)
except Exception:
raise CelestialError(err_msg)
else:
from xml.dom.minidom import parse
birthday = atime.Time(birthday)
if corrected:
with warnings.catch_warnings():
warnings.simplefilter('ignore') # Ignore ErfaWarning
zodiac_sign = get_sun(birthday).get_constellation().lower()
zodiac_sign = _CONST_TO_SIGNS.get(zodiac_sign, zodiac_sign)
if zodiac_sign not in _VALID_SIGNS:
raise HumanError('On your birthday the sun was in {}, which is not '
'a sign of the zodiac. You must not exist. Or '
'maybe you can settle for '
'corrected=False.'.format(zodiac_sign.title()))
else:
zodiac_sign = get_sign(birthday.to_datetime())
url = "http://www.findyourfate.com/rss/dailyhoroscope-feed.php?sign={sign}&id=45"
summ_title_sfx = 'on {}'.format(today.strftime("%Y-%m-%d"))
with urlopen(url.format(sign=zodiac_sign.capitalize())) as f:
try:
doc = parse(f)
item = doc.getElementsByTagName('item')[0]
desc = item.getElementsByTagName('description')[0].childNodes[0].nodeValue
except Exception:
raise CelestialError(err_msg)
print("*"*79)
color_print("Horoscope for {} {}:".format(zodiac_sign.capitalize(), summ_title_sfx),
'green')
print("*"*79)
for block in textwrap.wrap(desc, 79):
split_block = block.split()
for i, word in enumerate(split_block):
for re_word in special_words.keys():
match = re.search(re_word, word)
if match is None:
continue
split_block[i] = _color_text(match.groups()[0], special_words[re_word])
print(" ".join(split_block))
def inject_horoscope():
import astropy
astropy._yourfuture = horoscope
inject_horoscope()
|
7558a0b533e0b6727e6b065eb5765a65c5ef541e4106ba92fd4a28769059e38f | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/coordinates
#
# You can then commit the changes to this file.
# angle_parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'SIGN UINT UFLOAT COLON DEGREE HOUR MINUTE SECOND SIMPLE_UNIT\n angle : hms\n | dms\n | arcsecond\n | arcminute\n | simple\n \n sign : SIGN\n |\n \n ufloat : UFLOAT\n | UINT\n \n colon : sign UINT COLON ufloat\n | sign UINT COLON UINT COLON ufloat\n \n spaced : sign UINT ufloat\n | sign UINT UINT ufloat\n \n generic : colon\n | spaced\n | sign UFLOAT\n | sign UINT\n \n hms : sign UINT HOUR\n | sign UINT HOUR ufloat\n | sign UINT HOUR UINT MINUTE\n | sign UINT HOUR UFLOAT MINUTE\n | sign UINT HOUR UINT MINUTE ufloat\n | sign UINT HOUR UINT MINUTE ufloat SECOND\n | generic HOUR\n \n dms : sign UINT DEGREE\n | sign UINT DEGREE ufloat\n | sign UINT DEGREE UINT MINUTE\n | sign UINT DEGREE UFLOAT MINUTE\n | sign UINT DEGREE UINT MINUTE ufloat\n | sign UINT DEGREE UINT MINUTE ufloat SECOND\n | generic DEGREE\n \n simple : generic\n | generic SIMPLE_UNIT\n \n arcsecond : generic SECOND\n \n arcminute : generic MINUTE\n '
_lr_action_items = {'SIGN':([0,],[9,]),'UINT':([0,7,9,12,19,20,23,24,35,37,39,],[-7,12,-6,19,25,27,30,33,25,25,25,]),'UFLOAT':([0,7,9,12,19,20,23,24,35,37,39,],[-7,13,-6,22,22,29,32,22,22,22,22,]),'$end':([1,2,3,4,5,6,8,10,11,12,13,14,15,16,17,18,19,20,21,22,23,25,26,27,28,29,30,31,32,33,34,35,36,37,38,40,41,42,43,44,],[0,-1,-2,-3,-4,-5,-32,-14,-15,-17,-16,-24,-31,-34,-35,-33,-9,-18,-12,-8,-25,-9,-13,-9,-19,-8,-9,-26,-8,-9,-10,-20,-21,-27,-28,-22,-29,-11,-23,-30,]),'HOUR':([8,10,11,12,13,19,21,22,25,26,33,34,42,],[14,-14,-15,20,-16,-9,-12,-8,-9,-13,-9,-10,-11,]),'DEGREE':([8,10,11,12,13,19,21,22,25,26,33,34,42,],[15,-14,-15,23,-16,-9,-12,-8,-9,-13,-9,-10,-11,]),'SECOND':([8,10,11,12,13,19,21,22,25,26,33,34,40,41,42,],[16,-14,-15,-17,-16,-9,-12,-8,-9,-13,-9,-10,43,44,-11,]),'MINUTE':([8,10,11,12,13,19,21,22,25,26,27,29,30,32,33,34,42,],[17,-14,-15,-17,-16,-9,-12,-8,-9,-13,35,36,37,38,-9,-10,-11,]),'SIMPLE_UNIT':([8,10,11,12,13,19,21,22,25,26,33,34,42,],[18,-14,-15,-17,-16,-9,-12,-8,-9,-13,-9,-10,-11,]),'COLON':([12,33,],[24,39,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'angle':([0,],[1,]),'hms':([0,],[2,]),'dms':([0,],[3,]),'arcsecond':([0,],[4,]),'arcminute':([0,],[5,]),'simple':([0,],[6,]),'sign':([0,],[7,]),'generic':([0,],[8,]),'colon':([0,],[10,]),'spaced':([0,],[11,]),'ufloat':([12,19,20,23,24,35,37,39,],[21,26,28,31,34,40,41,42,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> angle","S'",1,None,None,None),
('angle -> hms','angle',1,'p_angle','angle_utilities.py',157),
('angle -> dms','angle',1,'p_angle','angle_utilities.py',158),
('angle -> arcsecond','angle',1,'p_angle','angle_utilities.py',159),
('angle -> arcminute','angle',1,'p_angle','angle_utilities.py',160),
('angle -> simple','angle',1,'p_angle','angle_utilities.py',161),
('sign -> SIGN','sign',1,'p_sign','angle_utilities.py',167),
('sign -> <empty>','sign',0,'p_sign','angle_utilities.py',168),
('ufloat -> UFLOAT','ufloat',1,'p_ufloat','angle_utilities.py',177),
('ufloat -> UINT','ufloat',1,'p_ufloat','angle_utilities.py',178),
('colon -> sign UINT COLON ufloat','colon',4,'p_colon','angle_utilities.py',184),
('colon -> sign UINT COLON UINT COLON ufloat','colon',6,'p_colon','angle_utilities.py',185),
('spaced -> sign UINT ufloat','spaced',3,'p_spaced','angle_utilities.py',194),
('spaced -> sign UINT UINT ufloat','spaced',4,'p_spaced','angle_utilities.py',195),
('generic -> colon','generic',1,'p_generic','angle_utilities.py',204),
('generic -> spaced','generic',1,'p_generic','angle_utilities.py',205),
('generic -> sign UFLOAT','generic',2,'p_generic','angle_utilities.py',206),
('generic -> sign UINT','generic',2,'p_generic','angle_utilities.py',207),
('hms -> sign UINT HOUR','hms',3,'p_hms','angle_utilities.py',216),
('hms -> sign UINT HOUR ufloat','hms',4,'p_hms','angle_utilities.py',217),
('hms -> sign UINT HOUR UINT MINUTE','hms',5,'p_hms','angle_utilities.py',218),
('hms -> sign UINT HOUR UFLOAT MINUTE','hms',5,'p_hms','angle_utilities.py',219),
('hms -> sign UINT HOUR UINT MINUTE ufloat','hms',6,'p_hms','angle_utilities.py',220),
('hms -> sign UINT HOUR UINT MINUTE ufloat SECOND','hms',7,'p_hms','angle_utilities.py',221),
('hms -> generic HOUR','hms',2,'p_hms','angle_utilities.py',222),
('dms -> sign UINT DEGREE','dms',3,'p_dms','angle_utilities.py',235),
('dms -> sign UINT DEGREE ufloat','dms',4,'p_dms','angle_utilities.py',236),
('dms -> sign UINT DEGREE UINT MINUTE','dms',5,'p_dms','angle_utilities.py',237),
('dms -> sign UINT DEGREE UFLOAT MINUTE','dms',5,'p_dms','angle_utilities.py',238),
('dms -> sign UINT DEGREE UINT MINUTE ufloat','dms',6,'p_dms','angle_utilities.py',239),
('dms -> sign UINT DEGREE UINT MINUTE ufloat SECOND','dms',7,'p_dms','angle_utilities.py',240),
('dms -> generic DEGREE','dms',2,'p_dms','angle_utilities.py',241),
('simple -> generic','simple',1,'p_simple','angle_utilities.py',254),
('simple -> generic SIMPLE_UNIT','simple',2,'p_simple','angle_utilities.py',255),
('arcsecond -> generic SECOND','arcsecond',2,'p_arcsecond','angle_utilities.py',264),
('arcminute -> generic MINUTE','arcminute',2,'p_arcminute','angle_utilities.py',270),
]
|
7383afce4bb71edb922dbf8a737037fc1af6bada26887e08d66d34425423786d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import contextlib
import pathlib
import re
import sys
from collections import OrderedDict
from operator import itemgetter
import numpy as np
__all__ = ['register_reader', 'register_writer', 'register_identifier',
'identify_format', 'get_reader', 'get_writer', 'read', 'write',
'get_formats', 'IORegistryError', 'delay_doc_updates']
__doctest_skip__ = ['register_identifier']
_readers = OrderedDict()
_writers = OrderedDict()
_identifiers = OrderedDict()
PATH_TYPES = (str, pathlib.Path)
class IORegistryError(Exception):
"""Custom error for registry clashes.
"""
pass
# If multiple formats are added to one class the update of the docs is quite
# expensive. Classes for which the doc update is temporarly delayed are added
# to this set.
_delayed_docs_classes = set()
@contextlib.contextmanager
def delay_doc_updates(cls):
"""Contextmanager to disable documentation updates when registering
reader and writer. The documentation is only built once when the
contextmanager exits.
.. versionadded:: 1.3
Parameters
----------
cls : class
Class for which the documentation updates should be delayed.
Notes
-----
Registering mutliple readers and writers can cause significant overhead
because the documentation of the corresponding ``read`` and ``write``
methods are build every time.
.. warning::
This contextmanager is experimental and may be replaced by a more
general approach.
Examples
--------
see for example the source code of ``astropy.table.__init__``.
"""
_delayed_docs_classes.add(cls)
yield
_delayed_docs_classes.discard(cls)
_update__doc__(cls, 'read')
_update__doc__(cls, 'write')
def get_formats(data_class=None, readwrite=None):
"""
Get the list of registered I/O formats as a Table.
Parameters
----------
data_class : classobj, optional
Filter readers/writer to match data class (default = all classes).
readwrite : str or None, optional
Search only for readers (``"Read"``) or writers (``"Write"``). If None
search for both. Default is None.
.. versionadded:: 1.3
Returns
-------
format_table : Table
Table of available I/O formats.
"""
from ..table import Table
format_classes = sorted(set(_readers) | set(_writers), key=itemgetter(0))
rows = []
for format_class in format_classes:
if (data_class is not None and not _is_best_match(
data_class, format_class[1], format_classes)):
continue
has_read = 'Yes' if format_class in _readers else 'No'
has_write = 'Yes' if format_class in _writers else 'No'
has_identify = 'Yes' if format_class in _identifiers else 'No'
# Check if this is a short name (e.g. 'rdb') which is deprecated in
# favor of the full 'ascii.rdb'.
ascii_format_class = ('ascii.' + format_class[0], format_class[1])
deprecated = 'Yes' if ascii_format_class in format_classes else ''
rows.append((format_class[1].__name__, format_class[0], has_read,
has_write, has_identify, deprecated))
if readwrite is not None:
if readwrite == 'Read':
rows = [row for row in rows if row[2] == 'Yes']
elif readwrite == 'Write':
rows = [row for row in rows if row[3] == 'Yes']
else:
raise ValueError('unrecognized value for "readwrite": {0}.\n'
'Allowed are "Read" and "Write" and None.')
# Sorting the list of tuples is much faster than sorting it after the table
# is created. (#5262)
if rows:
# Indices represent "Data Class", "Deprecated" and "Format".
data = list(zip(*sorted(rows, key=itemgetter(0, 5, 1))))
else:
data = None
format_table = Table(data, names=('Data class', 'Format', 'Read', 'Write',
'Auto-identify', 'Deprecated'))
if not np.any(format_table['Deprecated'] == 'Yes'):
format_table.remove_column('Deprecated')
return format_table
def _update__doc__(data_class, readwrite):
"""
Update the docstring to include all the available readers / writers for the
``data_class.read`` or ``data_class.write`` functions (respectively).
"""
FORMATS_TEXT = 'The available built-in formats are:'
# Get the existing read or write method and its docstring
class_readwrite_func = getattr(data_class, readwrite)
if not isinstance(class_readwrite_func.__doc__, str):
# No docstring--could just be test code, or possibly code compiled
# without docstrings
return
lines = class_readwrite_func.__doc__.splitlines()
# Find the location of the existing formats table if it exists
sep_indices = [ii for ii, line in enumerate(lines) if FORMATS_TEXT in line]
if sep_indices:
# Chop off the existing formats table, including the initial blank line
chop_index = sep_indices[0]
lines = lines[:chop_index]
# Find the minimum indent, skipping the first line because it might be odd
matches = [re.search(r'(\S)', line) for line in lines[1:]]
left_indent = ' ' * min(match.start() for match in matches if match)
# Get the available unified I/O formats for this class
# Include only formats that have a reader, and drop the 'Data class' column
format_table = get_formats(data_class, readwrite.capitalize())
format_table.remove_column('Data class')
# Get the available formats as a table, then munge the output of pformat()
# a bit and put it into the docstring.
new_lines = format_table.pformat(max_lines=-1, max_width=80)
table_rst_sep = re.sub('-', '=', new_lines[1])
new_lines[1] = table_rst_sep
new_lines.insert(0, table_rst_sep)
new_lines.append(table_rst_sep)
# Check for deprecated names and include a warning at the end.
if 'Deprecated' in format_table.colnames:
new_lines.extend(['',
'Deprecated format names like ``aastex`` will be '
'removed in a future version. Use the full ',
'name (e.g. ``ascii.aastex``) instead.'])
new_lines = [FORMATS_TEXT, ''] + new_lines
lines.extend([left_indent + line for line in new_lines])
# Depending on Python version and whether class_readwrite_func is
# an instancemethod or classmethod, one of the following will work.
try:
class_readwrite_func.__doc__ = '\n'.join(lines)
except AttributeError:
class_readwrite_func.__func__.__doc__ = '\n'.join(lines)
def register_reader(data_format, data_class, function, force=False):
"""
Register a reader function.
Parameters
----------
data_format : str
The data format identifier. This is the string that will be used to
specify the data type when reading.
data_class : classobj
The class of the object that the reader produces.
function : function
The function to read in a data object.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
"""
if not (data_format, data_class) in _readers or force:
_readers[(data_format, data_class)] = function
else:
raise IORegistryError("Reader for format '{0}' and class '{1}' is "
'already defined'
''.format(data_format, data_class.__name__))
if data_class not in _delayed_docs_classes:
_update__doc__(data_class, 'read')
def unregister_reader(data_format, data_class):
"""
Unregister a reader function
Parameters
----------
data_format : str
The data format identifier.
data_class : classobj
The class of the object that the reader produces.
"""
if (data_format, data_class) in _readers:
_readers.pop((data_format, data_class))
else:
raise IORegistryError("No reader defined for format '{0}' and class '{1}'"
''.format(data_format, data_class.__name__))
if data_class not in _delayed_docs_classes:
_update__doc__(data_class, 'read')
def register_writer(data_format, data_class, function, force=False):
"""
Register a table writer function.
Parameters
----------
data_format : str
The data format identifier. This is the string that will be used to
specify the data type when writing.
data_class : classobj
The class of the object that can be written.
function : function
The function to write out a data object.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
"""
if not (data_format, data_class) in _writers or force:
_writers[(data_format, data_class)] = function
else:
raise IORegistryError("Writer for format '{0}' and class '{1}' is "
'already defined'
''.format(data_format, data_class.__name__))
if data_class not in _delayed_docs_classes:
_update__doc__(data_class, 'write')
def unregister_writer(data_format, data_class):
"""
Unregister a writer function
Parameters
----------
data_format : str
The data format identifier.
data_class : classobj
The class of the object that can be written.
"""
if (data_format, data_class) in _writers:
_writers.pop((data_format, data_class))
else:
raise IORegistryError("No writer defined for format '{0}' and class '{1}'"
''.format(data_format, data_class.__name__))
if data_class not in _delayed_docs_classes:
_update__doc__(data_class, 'write')
def register_identifier(data_format, data_class, identifier, force=False):
"""
Associate an identifier function with a specific data type.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : classobj
The class of the object that can be written.
identifier : function
A function that checks the argument specified to `read` or `write` to
determine whether the input can be interpreted as a table of type
``data_format``. This function should take the following arguments:
- ``origin``: A string ``"read"`` or ``"write"`` identifying whether
the file is to be opened for reading or writing.
- ``path``: The path to the file.
- ``fileobj``: An open file object to read the file's contents, or
`None` if the file could not be opened.
- ``*args``: Positional arguments for the `read` or `write`
function.
- ``**kwargs``: Keyword arguments for the `read` or `write`
function.
One or both of ``path`` or ``fileobj`` may be `None`. If they are
both `None`, the identifier will need to work from ``args[0]``.
The function should return True if the input can be identified
as being of format ``data_format``, and False otherwise.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
Examples
--------
To set the identifier based on extensions, for formats that take a
filename as a first argument, you can do for example::
>>> def my_identifier(*args, **kwargs):
... return isinstance(args[0], str) and args[0].endswith('.tbl')
>>> register_identifier('ipac', Table, my_identifier)
"""
if not (data_format, data_class) in _identifiers or force:
_identifiers[(data_format, data_class)] = identifier
else:
raise IORegistryError("Identifier for format '{0}' and class '{1}' is "
'already defined'.format(data_format,
data_class.__name__))
def unregister_identifier(data_format, data_class):
"""
Unregister an identifier function
Parameters
----------
data_format : str
The data format identifier.
data_class : classobj
The class of the object that can be read/written.
"""
if (data_format, data_class) in _identifiers:
_identifiers.pop((data_format, data_class))
else:
raise IORegistryError("No identifier defined for format '{0}' and class"
" '{1}'".format(data_format, data_class.__name__))
def identify_format(origin, data_class_required, path, fileobj, args, kwargs):
"""Loop through identifiers to see which formats match.
Parameters
----------
origin : str
A string ``"read`` or ``"write"`` identifying whether the file is to be
opened for reading or writing.
data_class_required : object
The specified class for the result of `read` or the class that is to be
written.
path : str, other path object or None
The path to the file or None.
fileobj : File object or None.
An open file object to read the file's contents, or ``None`` if the
file could not be opened.
args : sequence
Positional arguments for the `read` or `write` function. Note that
these must be provided as sequence.
kwargs : dict-like
Keyword arguments for the `read` or `write` function. Note that this
parameter must be `dict`-like.
Returns
-------
valid_formats : list
List of matching formats.
"""
valid_formats = []
for data_format, data_class in _identifiers:
if _is_best_match(data_class_required, data_class, _identifiers):
if _identifiers[(data_format, data_class)](
origin, path, fileobj, *args, **kwargs):
valid_formats.append(data_format)
return valid_formats
def _get_format_table_str(data_class, readwrite):
format_table = get_formats(data_class, readwrite=readwrite)
format_table.remove_column('Data class')
format_table_str = '\n'.join(format_table.pformat(max_lines=-1))
return format_table_str
def get_reader(data_format, data_class):
"""Get reader for ``data_format``.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : classobj
The class of the object that can be written.
Returns
-------
reader : callable
The registered reader function for this format and class.
"""
readers = [(fmt, cls) for fmt, cls in _readers if fmt == data_format]
for reader_format, reader_class in readers:
if _is_best_match(data_class, reader_class, readers):
return _readers[(reader_format, reader_class)]
else:
format_table_str = _get_format_table_str(data_class, 'Read')
raise IORegistryError(
"No reader defined for format '{0}' and class '{1}'.\nThe "
"available formats are:\n{2}".format(
data_format, data_class.__name__, format_table_str))
def get_writer(data_format, data_class):
"""Get writer for ``data_format``.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : classobj
The class of the object that can be written.
Returns
-------
writer : callable
The registered writer function for this format and class.
"""
writers = [(fmt, cls) for fmt, cls in _writers if fmt == data_format]
for writer_format, writer_class in writers:
if _is_best_match(data_class, writer_class, writers):
return _writers[(writer_format, writer_class)]
else:
format_table_str = _get_format_table_str(data_class, 'Write')
raise IORegistryError(
"No writer defined for format '{0}' and class '{1}'.\nThe "
"available formats are:\n{2}".format(
data_format, data_class.__name__, format_table_str))
def read(cls, *args, format=None, **kwargs):
"""
Read in data.
The arguments passed to this method depend on the format.
"""
ctx = None
try:
if format is None:
path = None
fileobj = None
if len(args):
if isinstance(args[0], PATH_TYPES):
from ..utils.data import get_readable_fileobj
# path might be a pathlib.Path object
if isinstance(args[0], pathlib.Path):
args = (str(args[0]),) + args[1:]
path = args[0]
try:
ctx = get_readable_fileobj(args[0], encoding='binary')
fileobj = ctx.__enter__()
except OSError:
raise
except Exception:
fileobj = None
else:
args = [fileobj] + list(args[1:])
elif hasattr(args[0], 'read'):
path = None
fileobj = args[0]
format = _get_valid_format(
'read', cls, path, fileobj, args, kwargs)
reader = get_reader(format, cls)
data = reader(*args, **kwargs)
if not isinstance(data, cls):
# User has read with a subclass where only the parent class is
# registered. This returns the parent class, so try coercing
# to desired subclass.
try:
data = cls(data)
except Exception:
raise TypeError('could not convert reader output to {0} '
'class.'.format(cls.__name__))
finally:
if ctx is not None:
ctx.__exit__(*sys.exc_info())
return data
def write(data, *args, format=None, **kwargs):
"""
Write out data.
The arguments passed to this method depend on the format.
"""
if format is None:
path = None
fileobj = None
if len(args):
if isinstance(args[0], PATH_TYPES):
# path might be a pathlib.Path object
if isinstance(args[0], pathlib.Path):
args = (str(args[0]),) + args[1:]
path = args[0]
fileobj = None
elif hasattr(args[0], 'read'):
path = None
fileobj = args[0]
format = _get_valid_format(
'write', data.__class__, path, fileobj, args, kwargs)
writer = get_writer(format, data.__class__)
writer(data, *args, **kwargs)
def _is_best_match(class1, class2, format_classes):
"""
Determine if class2 is the "best" match for class1 in the list
of classes. It is assumed that (class2 in classes) is True.
class2 is the the best match if:
- ``class1`` is a subclass of ``class2`` AND
- ``class2`` is the nearest ancestor of ``class1`` that is in classes
(which includes the case that ``class1 is class2``)
"""
if issubclass(class1, class2):
classes = {cls for fmt, cls in format_classes}
for parent in class1.__mro__:
if parent is class2: # class2 is closest registered ancestor
return True
if parent in classes: # class2 was superceded
return False
return False
def _get_valid_format(mode, cls, path, fileobj, args, kwargs):
"""
Returns the first valid format that can be used to read/write the data in
question. Mode can be either 'read' or 'write'.
"""
valid_formats = identify_format(mode, cls, path, fileobj, args, kwargs)
if len(valid_formats) == 0:
format_table_str = _get_format_table_str(cls, mode.capitalize())
raise IORegistryError("Format could not be identified.\n"
"The available formats are:\n"
"{0}".format(format_table_str))
elif len(valid_formats) > 1:
raise IORegistryError(
"Format is ambiguous - options are: {0}".format(
', '.join(sorted(valid_formats, key=itemgetter(0)))))
return valid_formats[0]
|
3e176f8ee63caf54896320760295498e55a46541c996876085cbfb8cd97b0d04 | """
Implements the wrapper for the Astropy test runner in the form of the
``./setup.py test`` distutils command.
"""
import os
import glob
import shutil
import subprocess
import sys
import tempfile
from setuptools import Command
class FixRemoteDataOption(type):
"""
This metaclass is used to catch cases where the user is running the tests
with --remote-data. We've now changed the --remote-data option so that it
takes arguments, but we still want --remote-data to work as before and to
enable all remote tests. With this metaclass, we can modify sys.argv
before distutils/setuptools try to parse the command-line options.
"""
def __init__(cls, name, bases, dct):
try:
idx = sys.argv.index('--remote-data')
except ValueError:
pass
else:
sys.argv[idx] = '--remote-data=any'
try:
idx = sys.argv.index('-R')
except ValueError:
pass
else:
sys.argv[idx] = '-R=any'
return super(FixRemoteDataOption, cls).__init__(name, bases, dct)
class AstropyTest(Command, metaclass=FixRemoteDataOption):
description = 'Run the tests for this package'
user_options = [
('package=', 'P',
"The name of a specific package to test, e.g. 'io.fits' or 'utils'. "
"If nothing is specified, all default tests are run."),
('test-path=', 't',
'Specify a test location by path. If a relative path to a .py file, '
'it is relative to the built package, so e.g., a leading "astropy/" '
'is necessary. If a relative path to a .rst file, it is relative to '
'the directory *below* the --docs-path directory, so a leading '
'"docs/" is usually necessary. May also be an absolute path.'),
('verbose-results', 'V',
'Turn on verbose output from pytest.'),
('plugins=', 'p',
'Plugins to enable when running pytest.'),
('pastebin=', 'b',
"Enable pytest pastebin output. Either 'all' or 'failed'."),
('args=', 'a',
'Additional arguments to be passed to pytest.'),
('remote-data=', 'R', 'Run tests that download remote data. Should be '
'one of none/astropy/any (defaults to none).'),
('pep8', '8',
'Enable PEP8 checking and disable regular tests. '
'Requires the pytest-pep8 plugin.'),
('pdb', 'd',
'Start the interactive Python debugger on errors.'),
('coverage', 'c',
'Create a coverage report. Requires the coverage package.'),
('open-files', 'o', 'Fail if any tests leave files open. Requires the '
'psutil package.'),
('parallel=', 'j',
'Run the tests in parallel on the specified number of '
'CPUs. If negative, all the cores on the machine will be '
'used. Requires the pytest-xdist plugin.'),
('docs-path=', None,
'The path to the documentation .rst files. If not provided, and '
'the current directory contains a directory called "docs", that '
'will be used.'),
('skip-docs', None,
"Don't test the documentation .rst files."),
('repeat=', None,
'How many times to repeat each test (can be used to check for '
'sporadic failures).'),
('temp-root=', None,
'The root directory in which to create the temporary testing files. '
'If unspecified the system default is used (e.g. /tmp) as explained '
'in the documentation for tempfile.mkstemp.')
]
package_name = ''
def initialize_options(self):
self.package = None
self.test_path = None
self.verbose_results = False
self.plugins = None
self.pastebin = None
self.args = None
self.remote_data = 'none'
self.pep8 = False
self.pdb = False
self.coverage = False
self.open_files = False
self.parallel = 0
self.docs_path = None
self.skip_docs = False
self.repeat = None
self.temp_root = None
def finalize_options(self):
# Normally we would validate the options here, but that's handled in
# run_tests
pass
def generate_testing_command(self):
"""
Build a Python script to run the tests.
"""
cmd_pre = '' # Commands to run before the test function
cmd_post = '' # Commands to run after the test function
if self.coverage:
pre, post = self._generate_coverage_commands()
cmd_pre += pre
cmd_post += post
set_flag = "import builtins; builtins._ASTROPY_TEST_ = True"
cmd = ('{cmd_pre}{0}; import {1.package_name}, sys; result = ('
'{1.package_name}.test('
'package={1.package!r}, '
'test_path={1.test_path!r}, '
'args={1.args!r}, '
'plugins={1.plugins!r}, '
'verbose={1.verbose_results!r}, '
'pastebin={1.pastebin!r}, '
'remote_data={1.remote_data!r}, '
'pep8={1.pep8!r}, '
'pdb={1.pdb!r}, '
'open_files={1.open_files!r}, '
'parallel={1.parallel!r}, '
'docs_path={1.docs_path!r}, '
'skip_docs={1.skip_docs!r}, '
'add_local_eggs_to_path=True, ' # see _build_temp_install below
'repeat={1.repeat!r})); '
'{cmd_post}'
'sys.exit(result)')
return cmd.format(set_flag, self, cmd_pre=cmd_pre, cmd_post=cmd_post)
def run(self):
"""
Run the tests!
"""
# Install the runtime dependencies.
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
# Ensure there is a doc path
if self.docs_path is None:
cfg_docs_dir = self.distribution.get_option_dict('build_docs').get('source_dir', None)
# Some affiliated packages use this.
# See astropy/package-template#157
if cfg_docs_dir is not None and os.path.exists(cfg_docs_dir[1]):
self.docs_path = os.path.abspath(cfg_docs_dir[1])
# fall back on a default path of "docs"
elif os.path.exists('docs'): # pragma: no cover
self.docs_path = os.path.abspath('docs')
# Build a testing install of the package
self._build_temp_install()
# Install the test dependencies
# NOTE: we do this here after _build_temp_install because there is
# a weird but which occurs if psutil is installed in this way before
# astropy is built, Cython can have segmentation fault. Strange, eh?
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
# Copy any additional dependencies that may have been installed via
# tests_requires or install_requires. We then pass the
# add_local_eggs_to_path=True option to package.test() to make sure the
# eggs get included in the path.
if os.path.exists('.eggs'):
shutil.copytree('.eggs', os.path.join(self.testing_path, '.eggs'))
# Run everything in a try: finally: so that the tmp dir gets deleted.
try:
# Construct this modules testing command
cmd = self.generate_testing_command()
# Run the tests in a subprocess--this is necessary since
# new extension modules may have appeared, and this is the
# easiest way to set up a new environment
testproc = subprocess.Popen(
[sys.executable, '-c', cmd],
cwd=self.testing_path, close_fds=False)
retcode = testproc.wait()
except KeyboardInterrupt:
import signal
# If a keyboard interrupt is handled, pass it to the test
# subprocess to prompt pytest to initiate its teardown
testproc.send_signal(signal.SIGINT)
retcode = testproc.wait()
finally:
# Remove temporary directory
shutil.rmtree(self.tmp_dir)
raise SystemExit(retcode)
def _build_temp_install(self):
"""
Install the package and to a temporary directory for the purposes of
testing. This allows us to test the install command, include the
entry points, and also avoids creating pyc and __pycache__ directories
inside the build directory
"""
# On OSX the default path for temp files is under /var, but in most
# cases on OSX /var is actually a symlink to /private/var; ensure we
# dereference that link, because py.test is very sensitive to relative
# paths...
tmp_dir = tempfile.mkdtemp(prefix=self.package_name + '-test-',
dir=self.temp_root)
self.tmp_dir = os.path.realpath(tmp_dir)
# We now install the package to the temporary directory. We do this
# rather than build and copy because this will ensure that e.g. entry
# points work.
self.reinitialize_command('install')
install_cmd = self.distribution.get_command_obj('install')
install_cmd.prefix = self.tmp_dir
self.run_command('install')
# We now get the path to the site-packages directory that was created
# inside self.tmp_dir
install_cmd = self.get_finalized_command('install')
self.testing_path = install_cmd.install_lib
# Ideally, docs_path is set properly in run(), but if it is still
# not set here, do not pretend it is, otherwise bad things happen.
# See astropy/package-template#157
if self.docs_path is not None:
new_docs_path = os.path.join(self.testing_path,
os.path.basename(self.docs_path))
shutil.copytree(self.docs_path, new_docs_path)
self.docs_path = new_docs_path
shutil.copy('setup.cfg', self.testing_path)
def _generate_coverage_commands(self):
"""
This method creates the post and pre commands if coverage is to be
generated
"""
if self.parallel != 0:
raise ValueError(
"--coverage can not be used with --parallel")
try:
import coverage # pylint: disable=W0611
except ImportError:
raise ImportError(
"--coverage requires that the coverage package is "
"installed.")
# Don't use get_pkg_data_filename here, because it
# requires importing astropy.config and thus screwing
# up coverage results for those packages.
coveragerc = os.path.join(
self.testing_path, self.package_name.replace('.', '/'),
'tests', 'coveragerc')
with open(coveragerc, 'r') as fd:
coveragerc_content = fd.read()
coveragerc_content = coveragerc_content.replace(
"{packagename}", self.package_name.replace('.', '/'))
tmp_coveragerc = os.path.join(self.tmp_dir, 'coveragerc')
with open(tmp_coveragerc, 'wb') as tmp:
tmp.write(coveragerc_content.encode('utf-8'))
cmd_pre = (
'import coverage; '
'cov = coverage.coverage(data_file="{0}", config_file="{1}"); '
'cov.start();'.format(
os.path.abspath(".coverage"), tmp_coveragerc))
cmd_post = (
'cov.stop(); '
'from astropy.tests.helper import _save_coverage; '
'_save_coverage(cov, result, "{0}", "{1}");'.format(
os.path.abspath('.'), self.testing_path))
return cmd_pre, cmd_post
|
17acf73183b0bbbf6d39cca69457c1a82e986f6eec358de6a48c8787911cb025 | """Implements the Astropy TestRunner which is a thin wrapper around py.test."""
import inspect
import os
import glob
import copy
import shlex
import sys
import tempfile
import warnings
import importlib
from collections import OrderedDict
from importlib.util import find_spec
from ..config.paths import set_temp_config, set_temp_cache
from ..utils import wraps, find_current_module
from ..utils.exceptions import AstropyWarning, AstropyDeprecationWarning
__all__ = ['TestRunner', 'TestRunnerBase', 'keyword']
def _has_test_dependencies(): # pragma: no cover
# Using the test runner will not work without these dependencies, but
# pytest-openfiles is optional, so it's not listed here.
required = ['pytest', 'pytest_remotedata', 'pytest_doctestplus']
for module in required:
spec = find_spec(module)
# Checking loader accounts for packages that were uninstalled
if spec is None or spec.loader is None:
return False
return True
class keyword:
"""
A decorator to mark a method as keyword argument for the ``TestRunner``.
Parameters
----------
default_value : `object`
The default value for the keyword argument. (Default: `None`)
priority : `int`
keyword argument methods are executed in order of descending priority.
"""
def __init__(self, default_value=None, priority=0):
self.default_value = default_value
self.priority = priority
def __call__(self, f):
def keyword(*args, **kwargs):
return f(*args, **kwargs)
keyword._default_value = self.default_value
keyword._priority = self.priority
# Set __doc__ explicitly here rather than using wraps because we want
# to keep the function name as keyword so we can inspect it later.
keyword.__doc__ = f.__doc__
return keyword
class TestRunnerBase:
"""
The base class for the TestRunner.
A test runner can be constructed by creating a subclass of this class and
defining 'keyword' methods. These are methods that have the
`~astropy.tests.runner.keyword` decorator, these methods are used to
construct allowed keyword arguments to the
`~astropy.tests.runner.TestRunnerBase.run_tests` method as a way to allow
customization of individual keyword arguments (and associated logic)
without having to re-implement the whole
`~astropy.tests.runner.TestRunnerBase.run_tests` method.
Examples
--------
A simple keyword method::
class MyRunner(TestRunnerBase):
@keyword('default_value'):
def spam(self, spam, kwargs):
\"\"\"
spam : `str`
The parameter description for the run_tests docstring.
\"\"\"
# Return value must be a list with a CLI parameter for pytest.
return ['--spam={}'.format(spam)]
"""
def __init__(self, base_path):
self.base_path = os.path.abspath(base_path)
def __new__(cls, *args, **kwargs):
# Before constructing the class parse all the methods that have been
# decorated with ``keyword``.
# The objective of this method is to construct a default set of keyword
# arguments to the ``run_tests`` method. It does this by inspecting the
# methods of the class for functions with the name ``keyword`` which is
# the name of the decorator wrapping function. Once it has created this
# dictionary, it also formats the docstring of ``run_tests`` to be
# comprised of the docstrings for the ``keyword`` methods.
# To add a keyword argument to the ``run_tests`` method, define a new
# method decorated with ``@keyword`` and with the ``self, name, kwargs``
# signature.
# Get all 'function' members as the wrapped methods are functions
functions = inspect.getmembers(cls, predicate=inspect.isfunction)
# Filter out anything that's not got the name 'keyword'
keywords = filter(lambda func: func[1].__name__ == 'keyword', functions)
# Sort all keywords based on the priority flag.
sorted_keywords = sorted(keywords, key=lambda x: x[1]._priority, reverse=True)
cls.keywords = OrderedDict()
doc_keywords = ""
for name, func in sorted_keywords:
# Here we test if the function has been overloaded to return
# NotImplemented which is the way to disable arguments on
# subclasses. If it has been disabled we need to remove it from the
# default keywords dict. We do it in the try except block because
# we do not have access to an instance of the class, so this is
# going to error unless the method is just doing `return
# NotImplemented`.
try:
# Second argument is False, as it is normally a bool.
# The other two are placeholders for objects.
if func(None, False, None) is NotImplemented:
continue
except Exception:
pass
# Construct the default kwargs dict and docstring
cls.keywords[name] = func._default_value
if func.__doc__:
doc_keywords += ' '*8
doc_keywords += func.__doc__.strip()
doc_keywords += '\n\n'
cls.run_tests.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords)
return super(TestRunnerBase, cls).__new__(cls)
def _generate_args(self, **kwargs):
# Update default values with passed kwargs
# but don't modify the defaults
keywords = copy.deepcopy(self.keywords)
keywords.update(kwargs)
# Iterate through the keywords (in order of priority)
args = []
for keyword in keywords.keys():
func = getattr(self, keyword)
result = func(keywords[keyword], keywords)
# Allow disabling of options in a subclass
if result is NotImplemented:
raise TypeError("run_tests() got an unexpected keyword argument {}".format(keyword))
# keyword methods must return a list
if not isinstance(result, list):
raise TypeError("{} keyword method must return a list".format(keyword))
args += result
return args
RUN_TESTS_DOCSTRING = \
"""
Run the tests for the package.
This method builds arguments for and then calls ``pytest.main``.
Parameters
----------
{keywords}
"""
def run_tests(self, **kwargs):
# The following option will include eggs inside a .eggs folder in
# sys.path when running the tests. This is possible so that when
# runnning python setup.py test, test dependencies installed via e.g.
# tests_requires are available here. This is not an advertised option
# since it is only for internal use
if kwargs.pop('add_local_eggs_to_path', False):
# Add each egg to sys.path individually
for egg in glob.glob(os.path.join('.eggs', '*.egg')):
sys.path.insert(0, egg)
# We now need to force reload pkg_resources in case any pytest
# plugins were added above, so that their entry points are picked up
import pkg_resources
importlib.reload(pkg_resources)
if not _has_test_dependencies(): # pragma: no cover
msg = "Test dependencies are missing. You should install the 'pytest-astropy' package."
raise RuntimeError(msg)
# The docstring for this method is defined as a class variable.
# This allows it to be built for each subclass in __new__.
# Don't import pytest until it's actually needed to run the tests
import pytest
# Raise error for undefined kwargs
allowed_kwargs = set(self.keywords.keys())
passed_kwargs = set(kwargs.keys())
if not passed_kwargs.issubset(allowed_kwargs):
wrong_kwargs = list(passed_kwargs.difference(allowed_kwargs))
raise TypeError("run_tests() got an unexpected keyword argument {}".format(wrong_kwargs[0]))
args = self._generate_args(**kwargs)
if 'plugins' not in self.keywords or self.keywords['plugins'] is None:
self.keywords['plugins'] = []
# Make plugins available to test runner without registering them
self.keywords['plugins'].extend([
'astropy.tests.plugins.display',
'astropy.tests.plugins.config'
])
# override the config locations to not make a new directory nor use
# existing cache or config
astropy_config = tempfile.mkdtemp('astropy_config')
astropy_cache = tempfile.mkdtemp('astropy_cache')
# Have to use nested with statements for cross-Python support
# Note, using these context managers here is superfluous if the
# config_dir or cache_dir options to py.test are in use, but it's
# also harmless to nest the contexts
with set_temp_config(astropy_config, delete=True):
with set_temp_cache(astropy_cache, delete=True):
return pytest.main(args=args, plugins=self.keywords['plugins'])
@classmethod
def make_test_runner_in(cls, path):
"""
Constructs a `TestRunner` to run in the given path, and returns a
``test()`` function which takes the same arguments as
`TestRunner.run_tests`.
The returned ``test()`` function will be defined in the module this
was called from. This is used to implement the ``astropy.test()``
function (or the equivalent for affiliated packages).
"""
runner = cls(path)
@wraps(runner.run_tests, ('__doc__',), exclude_args=('self',))
def test(**kwargs):
return runner.run_tests(**kwargs)
module = find_current_module(2)
if module is not None:
test.__module__ = module.__name__
# A somewhat unusual hack, but delete the attached __wrapped__
# attribute--although this is normally used to tell if the function
# was wrapped with wraps, on some version of Python this is also
# used to determine the signature to display in help() which is
# not useful in this case. We don't really care in this case if the
# function was wrapped either
if hasattr(test, '__wrapped__'):
del test.__wrapped__
return test
class TestRunner(TestRunnerBase):
"""
A test runner for astropy tests
"""
# Increase priority so this warning is displayed first.
@keyword(priority=1000)
def coverage(self, coverage, kwargs):
if coverage:
warnings.warn(
"The coverage option is ignored on run_tests, since it "
"can not be made to work in that context. Use "
"'python setup.py test --coverage' instead.",
AstropyWarning)
return []
# test_path depends on self.package_path so make sure this runs before
# test_path.
@keyword(priority=1)
def package(self, package, kwargs):
"""
package : str, optional
The name of a specific package to test, e.g. 'io.fits' or 'utils'.
If nothing is specified all default Astropy tests are run.
"""
if package is None:
self.package_path = self.base_path
else:
self.package_path = os.path.join(self.base_path,
package.replace('.', os.path.sep))
if not os.path.isdir(self.package_path):
raise ValueError('Package not found: {0}'.format(package))
if not kwargs['test_path']:
return [self.package_path]
return []
@keyword()
def test_path(self, test_path, kwargs):
"""
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
"""
all_args = []
# Ensure that the package kwarg has been run.
self.package(kwargs['package'], kwargs)
if test_path:
base, ext = os.path.splitext(test_path)
if ext in ('.rst', ''):
if kwargs['docs_path'] is None:
# This shouldn't happen from "python setup.py test"
raise ValueError(
"Can not test .rst files without a docs_path "
"specified.")
abs_docs_path = os.path.abspath(kwargs['docs_path'])
abs_test_path = os.path.abspath(
os.path.join(abs_docs_path, os.pardir, test_path))
common = os.path.commonprefix((abs_docs_path, abs_test_path))
if os.path.exists(abs_test_path) and common == abs_docs_path:
# Turn on the doctest_rst plugin
all_args.append('--doctest-rst')
test_path = abs_test_path
if not (os.path.isdir(test_path) or ext in ('.py', '.rst')):
raise ValueError("Test path must be a directory or a path to "
"a .py or .rst file")
return all_args + [test_path]
return []
@keyword()
def args(self, args, kwargs):
"""
args : str, optional
Additional arguments to be passed to ``pytest.main`` in the ``args``
keyword argument.
"""
if args:
return shlex.split(args, posix=not sys.platform.startswith('win'))
return []
@keyword()
def plugins(self, plugins, kwargs):
"""
plugins : list, optional
Plugins to be passed to ``pytest.main`` in the ``plugins`` keyword
argument.
"""
return []
@keyword()
def verbose(self, verbose, kwargs):
"""
verbose : bool, optional
Convenience option to turn on verbose output from py.test. Passing
True is the same as specifying ``-v`` in ``args``.
"""
if verbose:
return ['-v']
return []
@keyword()
def pastebin(self, pastebin, kwargs):
"""
pastebin : ('failed', 'all', None), optional
Convenience option for turning on py.test pastebin output. Set to
'failed' to upload info for failed tests, or 'all' to upload info
for all tests.
"""
if pastebin is not None:
if pastebin in ['failed', 'all']:
return ['--pastebin={0}'.format(pastebin)]
else:
raise ValueError("pastebin should be 'failed' or 'all'")
return []
@keyword(default_value='none')
def remote_data(self, remote_data, kwargs):
"""
remote_data : {'none', 'astropy', 'any'}, optional
Controls whether to run tests marked with @pytest.mark.remote_data. This can be
set to run no tests with remote data (``none``), only ones that use
data from http://data.astropy.org (``astropy``), or all tests that
use remote data (``any``). The default is ``none``.
"""
if remote_data is True:
remote_data = 'any'
elif remote_data is False:
remote_data = 'none'
elif remote_data not in ('none', 'astropy', 'any'):
warnings.warn("The remote_data option should be one of "
"none/astropy/any (found {0}). For backward-compatibility, "
"assuming 'any', but you should change the option to be "
"one of the supported ones to avoid issues in "
"future.".format(remote_data),
AstropyDeprecationWarning)
remote_data = 'any'
return ['--remote-data={0}'.format(remote_data)]
@keyword()
def pep8(self, pep8, kwargs):
"""
pep8 : bool, optional
Turn on PEP8 checking via the pytest-pep8 plugin and disable normal
tests. Same as specifying ``--pep8 -k pep8`` in ``args``.
"""
if pep8:
try:
import pytest_pep8 # pylint: disable=W0611
except ImportError:
raise ImportError('PEP8 checking requires pytest-pep8 plugin: '
'http://pypi.python.org/pypi/pytest-pep8')
else:
return ['--pep8', '-k', 'pep8']
return []
@keyword()
def pdb(self, pdb, kwargs):
"""
pdb : bool, optional
Turn on PDB post-mortem analysis for failing tests. Same as
specifying ``--pdb`` in ``args``.
"""
if pdb:
return ['--pdb']
return []
@keyword()
def open_files(self, open_files, kwargs):
"""
open_files : bool, optional
Fail when any tests leave files open. Off by default, because
this adds extra run time to the test suite. Requires the
``psutil`` package.
"""
if open_files:
if kwargs['parallel'] != 0:
raise SystemError(
"open file detection may not be used in conjunction with "
"parallel testing.")
try:
import psutil # pylint: disable=W0611
except ImportError:
raise SystemError(
"open file detection requested, but psutil package "
"is not installed.")
return ['--open-files']
print("Checking for unclosed files")
return []
@keyword(0)
def parallel(self, parallel, kwargs):
"""
parallel : int, optional
When provided, run the tests in parallel on the specified
number of CPUs. If parallel is negative, it will use the all
the cores on the machine. Requires the ``pytest-xdist`` plugin.
"""
if parallel != 0:
try:
from xdist import plugin # noqa
except ImportError:
raise SystemError(
"running tests in parallel requires the pytest-xdist package")
return ['-n', str(parallel)]
return []
@keyword()
def docs_path(self, docs_path, kwargs):
"""
docs_path : str, optional
The path to the documentation .rst files.
"""
if docs_path is not None and not kwargs['skip_docs']:
if kwargs['package'] is not None:
docs_path = os.path.join(
docs_path, kwargs['package'].replace('.', os.path.sep))
if not os.path.exists(docs_path):
warnings.warn(
"Can not test .rst docs, since docs path "
"({0}) does not exist.".format(docs_path))
docs_path = None
if docs_path and not kwargs['skip_docs'] and not kwargs['test_path']:
return [docs_path, '--doctest-rst']
return []
@keyword()
def skip_docs(self, skip_docs, kwargs):
"""
skip_docs : `bool`, optional
When `True`, skips running the doctests in the .rst files.
"""
# Skip docs is a bool used by docs_path only.
return []
@keyword()
def repeat(self, repeat, kwargs):
"""
repeat : `int`, optional
If set, specifies how many times each test should be run. This is
useful for diagnosing sporadic failures.
"""
if repeat:
return ['--repeat={0}'.format(repeat)]
return []
# Override run_tests for astropy-specific fixes
def run_tests(self, **kwargs):
# This prevents cyclical import problems that make it
# impossible to test packages that define Table types on their
# own.
from ..table import Table # pylint: disable=W0611
return super(TestRunner, self).run_tests(**kwargs)
|
05bcda36fabdadd3fbe9ee3037c48a10077bbf9ca269b50573032d70f72a57c0 | import matplotlib
from matplotlib import pyplot as plt
from ..utils.decorators import wraps
MPL_VERSION = matplotlib.__version__
ROOT = "http://{server}/testing/astropy/2018-03-27T18:38:34.793133/{mpl_version}/"
IMAGE_REFERENCE_DIR = (ROOT.format(server='data.astropy.org', mpl_version=MPL_VERSION[:3] + '.x') + ',' +
ROOT.format(server='www.astropy.org/astropy-data', mpl_version=MPL_VERSION[:3] + '.x'))
def ignore_matplotlibrc(func):
# This is a decorator for tests that use matplotlib but not pytest-mpl
# (which already handles rcParams)
@wraps(func)
def wrapper(*args, **kwargs):
with plt.style.context({}, after_reset=True):
return func(*args, **kwargs)
return wrapper
|
1711f140270a6976d734742985ce3a550f236eb90de7bbb989e7cf629d87c7a1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
from ..jackknife import jackknife_resampling, jackknife_stats
def test_jackknife_resampling():
data = np.array([1, 2, 3, 4])
answer = np.array([[2, 3, 4], [1, 3, 4], [1, 2, 4], [1, 2, 3]])
assert_equal(answer, jackknife_resampling(data))
# test jackknife stats, except confidence interval
@pytest.mark.skipif('not HAS_SCIPY')
def test_jackknife_stats():
# Test from the third example of Ref.[3]
data = np.array((115, 170, 142, 138, 280, 470, 480, 141, 390))
# true estimate, bias, and std_err
answer = (258.4444, 0.0, 50.25936)
assert_allclose(answer, jackknife_stats(data, np.mean)[0:3], atol=1e-4)
# test jackknife stats, including confidence intervals
@pytest.mark.skipif('not HAS_SCIPY')
def test_jackknife_stats_conf_interval():
# Test from the first example of Ref.[3]
data = np.array([48, 42, 36, 33, 20, 16, 29, 39, 42, 38, 42, 36, 20, 15,
42, 33, 22, 20, 41, 43, 45, 34, 14, 22, 6, 7, 0, 15, 33,
34, 28, 29, 34, 41, 4, 13, 32, 38, 24, 25, 47, 27, 41, 41,
24, 28, 26, 14, 30, 28, 41, 40])
data = np.reshape(data, (-1, 2))
data = data[:, 1]
# true estimate, bias, and std_err
answer = (113.7862, -4.376391, 22.26572)
# calculate the mle of the variance (biased estimator!)
def mle_var(x): return np.sum((x - np.mean(x))*(x - np.mean(x)))/len(x)
assert_allclose(answer, jackknife_stats(data, mle_var, 0.95)[0:3],
atol=1e-4)
# test confidence interval
answer = np.array((70.14615, 157.42616))
assert_allclose(answer, jackknife_stats(data, mle_var, 0.95)[3], atol=1e-4)
|
d91d328ad86ca5698894cbd05d53577033ba1dee19e9e1945834cffb0574955e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.random import randn
from numpy.testing import assert_equal, assert_allclose
try:
from scipy import stats # used in testing
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
from ..sigma_clipping import sigma_clip, SigmaClip, sigma_clipped_stats
from ...utils.misc import NumpyRNGContext
def test_sigma_clip():
# need to seed the numpy RNG to make sure we don't get some
# amazingly flukey random number that breaks one of the tests
with NumpyRNGContext(12345):
# Amazing, I've got the same combination on my luggage!
randvar = randn(10000)
filtered_data = sigma_clip(randvar, sigma=1, iters=2)
assert sum(filtered_data.mask) > 0
assert sum(~filtered_data.mask) < randvar.size
# this is actually a silly thing to do, because it uses the
# standard deviation as the variance, but it tests to make sure
# these arguments are actually doing something
filtered_data2 = sigma_clip(randvar, sigma=1, iters=2, stdfunc=np.var)
assert not np.all(filtered_data.mask == filtered_data2.mask)
filtered_data3 = sigma_clip(randvar, sigma=1, iters=2,
cenfunc=np.mean)
assert not np.all(filtered_data.mask == filtered_data3.mask)
# make sure the iters=None method works at all.
filtered_data = sigma_clip(randvar, sigma=3, iters=None)
# test copying
assert filtered_data.data[0] == randvar[0]
filtered_data.data[0] += 1.
assert filtered_data.data[0] != randvar[0]
filtered_data = sigma_clip(randvar, sigma=3, iters=None, copy=False)
assert filtered_data.data[0] == randvar[0]
filtered_data.data[0] += 1.
assert filtered_data.data[0] == randvar[0]
# test axis
data = np.arange(5) + np.random.normal(0., 0.05, (5, 5)) + \
np.diag(np.ones(5))
filtered_data = sigma_clip(data, axis=0, sigma=2.3)
assert filtered_data.count() == 20
filtered_data = sigma_clip(data, axis=1, sigma=2.3)
assert filtered_data.count() == 25
@pytest.mark.skipif('not HAS_SCIPY')
def test_compare_to_scipy_sigmaclip():
# need to seed the numpy RNG to make sure we don't get some
# amazingly flukey random number that breaks one of the tests
with NumpyRNGContext(12345):
randvar = randn(10000)
astropyres = sigma_clip(randvar, sigma=3, iters=None, cenfunc=np.mean)
scipyres = stats.sigmaclip(randvar, 3, 3)[0]
assert astropyres.count() == len(scipyres)
assert_equal(astropyres[~astropyres.mask].data, scipyres)
def test_sigma_clip_scalar_mask():
"""Test that the returned mask is not a scalar."""
data = np.arange(5)
result = sigma_clip(data, sigma=100., iters=1)
assert result.mask.shape != ()
def test_sigma_clip_class():
with NumpyRNGContext(12345):
data = randn(100)
data[10] = 1.e5
sobj = SigmaClip(sigma=1, iters=2)
sfunc = sigma_clip(data, sigma=1, iters=2)
assert_equal(sobj(data), sfunc)
def test_sigma_clipped_stats():
"""Test list data with input mask or mask_value (#3268)."""
# test list data with mask
data = [0, 1]
mask = np.array([True, False])
result = sigma_clipped_stats(data, mask=mask)
# Check that the result of np.ma.median was converted to a scalar
assert isinstance(result[1], float)
assert result == (1., 1., 0.)
# test list data with mask_value
result = sigma_clipped_stats(data, mask_value=0.)
assert isinstance(result[1], float)
assert result == (1., 1., 0.)
# test without mask
data = [0, 2]
result = sigma_clipped_stats(data)
assert isinstance(result[1], float)
assert result == (1., 1., 1.)
_data = np.arange(10)
data = np.ma.MaskedArray([_data, _data, 10 * _data])
mean = sigma_clip(data, axis=0, sigma=1).mean(axis=0)
assert_equal(mean, _data)
mean, median, stddev = sigma_clipped_stats(data, axis=0, sigma=1)
assert_equal(mean, _data)
assert_equal(median, _data)
assert_equal(stddev, np.zeros_like(_data))
def test_sigma_clipped_stats_ddof():
with NumpyRNGContext(12345):
data = randn(10000)
data[10] = 1.e5
mean1, median1, stddev1 = sigma_clipped_stats(data)
mean2, median2, stddev2 = sigma_clipped_stats(data, std_ddof=1)
assert mean1 == mean2
assert median1 == median2
assert_allclose(stddev1, 0.98156805711673156)
assert_allclose(stddev2, 0.98161731654802831)
def test_invalid_sigma_clip():
"""Test sigma_clip of data containing invalid values."""
data = np.ones((5, 5))
data[2, 2] = 1000
data[3, 4] = np.nan
data[1, 1] = np.inf
result = sigma_clip(data)
# Pre #4051 if data contains any NaN or infs sigma_clip returns the
# mask containing `False` only or TypeError if data also contains a
# masked value.
assert result.mask[2, 2]
assert result.mask[3, 4]
assert result.mask[1, 1]
def test_sigmaclip_negative_axis():
"""Test that dimensions are expanded correctly even if axis is negative."""
data = np.ones((3, 4))
# without correct expand_dims this would raise a ValueError
sigma_clip(data, axis=-1)
def test_sigmaclip_fully_masked():
"""Make sure a fully masked array is returned when sigma clipping a fully
masked array.
"""
data = np.ma.MaskedArray(data=[[1., 0.], [0., 1.]],
mask=[[True, True], [True, True]])
clipped_data = sigma_clip(data)
np.ma.allequal(data, clipped_data)
def test_sigmaclip_empty_masked():
"""Make sure a empty masked array is returned when sigma clipping an empty
masked array.
"""
data = np.ma.MaskedArray(data=[], mask=[])
clipped_data = sigma_clip(data)
np.ma.allequal(data, clipped_data)
def test_sigmaclip_empty():
"""Make sure a empty array is returned when sigma clipping an empty array.
"""
data = np.array([])
clipped_data = sigma_clip(data)
assert_equal(data, clipped_data)
def test_sigma_clip_axis_tuple_3D():
"""Test sigma clipping over a subset of axes (issue #7227).
"""
data = np.sin(0.78 * np.arange(27)).reshape(3,3,3)
mask = np.zeros_like(data, dtype=np.bool)
data_t = np.rollaxis(data, 1, 0)
mask_t = np.rollaxis(mask, 1, 0)
# Loop over what was originally axis 1 and clip each plane directly:
for data_plane, mask_plane in zip(data_t, mask_t):
mean = data_plane.mean()
maxdev = 1.5 * data_plane.std()
mask_plane[:] = np.logical_or(data_plane < mean - maxdev,
data_plane > mean + maxdev)
# Do the equivalent thing using sigma_clip:
result = sigma_clip(data, sigma=1.5, cenfunc=np.mean, iters=1, axis=(0,-1))
assert_equal(result.mask, mask)
|
632e662f42576d4503b77e158aa27f2f7f55d85fa8663d29dca46d842c663ec1 |
from numpy.testing import assert_allclose
from ..info_theory import bayesian_info_criterion, bayesian_info_criterion_lsq
from ..info_theory import akaike_info_criterion, akaike_info_criterion_lsq
def test_bayesian_info_criterion():
# This test is from an example presented in Ref [1]
lnL = (-176.4, -173.0)
n_params = (2, 3)
n_samples = 100
answer = 2.195
bic_g = bayesian_info_criterion(lnL[0], n_params[0], n_samples)
bic_t = bayesian_info_criterion(lnL[1], n_params[1], n_samples)
assert_allclose(answer, bic_g - bic_t, atol=1e-1)
def test_akaike_info_criterion():
# This test is from an example presented in Ref [2]
n_samples = 121
lnL = (-3.54, -4.17)
n_params = (6, 5)
answer = 0.95
aic_1 = akaike_info_criterion(lnL[0], n_params[0], n_samples)
aic_2 = akaike_info_criterion(lnL[1], n_params[1], n_samples)
assert_allclose(answer, aic_1 - aic_2, atol=1e-2)
def test_akaike_info_criterion_lsq():
# This test is from an example presented in Ref [1]
n_samples = 100
n_params = (4, 3, 3)
ssr = (25.0, 26.0, 27.0)
answer = (-130.21, -128.46, -124.68)
assert_allclose(answer[0],
akaike_info_criterion_lsq(ssr[0], n_params[0], n_samples),
atol=1e-2)
assert_allclose(answer[1],
akaike_info_criterion_lsq(ssr[1], n_params[1], n_samples),
atol=1e-2)
assert_allclose(answer[2],
akaike_info_criterion_lsq(ssr[2], n_params[2], n_samples),
atol=1e-2)
def test_bayesian_info_criterion_lsq():
"""This test is from:
http://www.statoek.wiso.uni-goettingen.de/veranstaltungen/non_semi_models/
AkaikeLsg.pdf
Note that in there, they compute a "normalized BIC". Therefore, the
answers presented here are recalculated versions based on their values.
"""
n_samples = 25
n_params = (1, 2, 1)
ssr = (48959, 32512, 37980)
answer = (192.706, 185.706, 186.360)
assert_allclose(answer[0], bayesian_info_criterion_lsq(ssr[0],
n_params[0],
n_samples),
atol=1e-2)
assert_allclose(answer[1], bayesian_info_criterion_lsq(ssr[1],
n_params[1],
n_samples),
atol=1e-2)
assert_allclose(answer[2], bayesian_info_criterion_lsq(ssr[2],
n_params[2],
n_samples),
atol=1e-2)
|
01ab3fdbec625656050884f11df9dc306b914cf294301295c6b848b29519ec27 |
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from astropy import units as u
try:
import scipy.stats
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
from ..circstats import _length, circmean, circvar, circmoment, circcorrcoef
from ..circstats import rayleightest, vtest, vonmisesmle
def test__length():
# testing against R CircStats package
# Ref. [1] pages 6 and 125
weights = np.array([12, 1, 6, 1, 2, 1, 1])
answer = 0.766282
data = np.array([0, 3.6, 36, 72, 108, 169.2, 324])*u.deg
assert_allclose(answer, _length(data, weights=weights), atol=1e-4)
def test_circmean():
# testing against R CircStats package
# Ref[1], page 23
data = np.array([51, 67, 40, 109, 31, 358])*u.deg
answer = 48.63*u.deg
assert_equal(answer, np.around(circmean(data), 2))
@pytest.mark.skipif('not HAS_SCIPY')
def test_circmean_against_scipy():
# testing against scipy.stats.circmean function
# the data is the same as the test before, but in radians
data = np.array([0.89011792, 1.1693706, 0.6981317, 1.90240888, 0.54105207,
6.24827872])
answer = scipy.stats.circmean(data)
assert_equal(np.around(answer, 2), np.around(circmean(data), 2))
def test_circvar():
# testing against R CircStats package
# Ref[1], page 23
data = np.array([51, 67, 40, 109, 31, 358])*u.deg
answer = 0.1635635
assert_allclose(answer, circvar(data), atol=1e-4)
def test_circmoment():
# testing against R CircStats package
# Ref[1], page 23
data = np.array([51, 67, 40, 109, 31, 358])*u.deg
# 2nd, 3rd, and 4th moments
# this is the answer given in Ref[1] in radians
answer = np.array([1.588121, 1.963919, 2.685556])
answer = np.around(np.rad2deg(answer)*u.deg, 4)
result = (np.around(circmoment(data, p=2)[0], 4),
np.around(circmoment(data, p=3)[0], 4),
np.around(circmoment(data, p=4)[0], 4))
assert_equal(answer[0], result[0])
assert_equal(answer[1], result[1])
assert_equal(answer[2], result[2])
# testing lengths
answer = np.array([0.4800428, 0.236541, 0.2255761])
assert_allclose(answer, (circmoment(data, p=2)[1],
circmoment(data, p=3)[1],
circmoment(data, p=4)[1]), atol=1e-4)
def test_circcorrcoef():
# testing against R CircStats package
# Ref[1], page 180
alpha = np.array([356, 97, 211, 232, 343, 292, 157, 302, 335, 302, 324,
85, 324, 340, 157, 238, 254, 146, 232, 122, 329])*u.deg
beta = np.array([119, 162, 221, 259, 270, 29, 97, 292, 40, 313, 94, 45,
47, 108, 221, 270, 119, 248, 270, 45, 23])*u.deg
answer = 0.2704648
assert_allclose(answer, circcorrcoef(alpha, beta), atol=1e-4)
def test_rayleightest():
# testing against R CircStats package
data = np.array([190.18, 175.48, 155.95, 217.83, 156.36])*u.deg
# answer was obtained through R CircStats function r.test(x)
answer = (0.00640418, 0.9202565)
result = (rayleightest(data), _length(data))
assert_allclose(answer[0], result[0], atol=1e-4)
assert_allclose(answer[1], result[1], atol=1e-4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_vtest():
# testing against R CircStats package
data = np.array([190.18, 175.48, 155.95, 217.83, 156.36])*u.deg
# answer was obtained through R CircStats function v0.test(x)
answer = 0.9994725
assert_allclose(answer, vtest(data), atol=1e-5)
def test_vonmisesmle():
# testing against R CircStats package
# testing non-Quantity
data = np.array([3.3699057, 4.0411630, 0.5014477, 2.6223103, 3.7336524,
1.8136389, 4.1566039, 2.7806317, 2.4672173,
2.8493644])
# answer was obtained through R CircStats function vm.ml(x)
answer = (3.006514, 1.474132)
assert_allclose(answer[0], vonmisesmle(data)[0], atol=1e-5)
assert_allclose(answer[1], vonmisesmle(data)[1], atol=1e-5)
# testing with Quantity
data = np.rad2deg(data)*u.deg
answer = np.rad2deg(3.006514)*u.deg
assert_equal(np.around(answer, 3), np.around(vonmisesmle(data)[0], 3))
|
a8166f7a88c28ba6e0dae487d8130cfbcffba181b136bfeb6c240f0077318628 |
import numpy as np
import pytest
from numpy.testing import assert_allclose
from ..spatial import RipleysKEstimator
from ...utils.misc import NumpyRNGContext
a = np.array([[1, 4], [2, 5], [3, 6]])
b = np.array([[-1, 1], [-2, 2], [-3, 3]])
@pytest.mark.parametrize("points, x_min, x_max", [(a, 0, 10), (b, -5, 5)])
def test_ripley_K_implementation(points, x_min, x_max):
"""
Test against Ripley's K function implemented in R package `spatstat`
+-+---------+---------+----------+---------+-+
6 + * +
| |
| |
5.5 + +
| |
| |
5 + * +
| |
4.5 + +
| |
| |
4 + * +
+-+---------+---------+----------+---------+-+
1 1.5 2 2.5 3
+-+---------+---------+----------+---------+-+
3 + * +
| |
| |
2.5 + +
| |
| |
2 + * +
| |
1.5 + +
| |
| |
1 + * +
+-+---------+---------+----------+---------+-+
-3 -2.5 -2 -1.5 -1
"""
area = 100
r = np.linspace(0, 2.5, 5)
Kest = RipleysKEstimator(area=area, x_min=x_min, y_min=x_min, x_max=x_max,
y_max=x_max)
ANS_NONE = np.array([0, 0, 0, 66.667, 66.667])
assert_allclose(ANS_NONE, Kest(data=points, radii=r, mode='none'),
atol=1e-3)
ANS_TRANS = np.array([0, 0, 0, 82.304, 82.304])
assert_allclose(ANS_TRANS, Kest(data=points, radii=r, mode='translation'),
atol=1e-3)
with NumpyRNGContext(123):
a = np.random.uniform(low=5, high=10, size=(100, 2))
b = np.random.uniform(low=-5, high=-10, size=(100, 2))
@pytest.mark.parametrize("points", [a, b])
def test_ripley_uniform_property(points):
# Ripley's K function without edge-correction converges to the area when
# the number of points and the argument radii are large enough, i.e.,
# K(x) --> area as x --> inf
area = 50
Kest = RipleysKEstimator(area=area)
r = np.linspace(0, 20, 5)
assert_allclose(area, Kest(data=points, radii=r, mode='none')[4])
with NumpyRNGContext(123):
a = np.random.uniform(low=0, high=1, size=(500, 2))
b = np.random.uniform(low=-1, high=0, size=(500, 2))
@pytest.mark.parametrize("points, low, high", [(a, 0, 1), (b, -1, 0)])
def test_ripley_large_density(points, low, high):
Kest = RipleysKEstimator(area=1, x_min=low, x_max=high, y_min=low,
y_max=high)
r = np.linspace(0, 0.25, 25)
Kpos = Kest.poisson(r)
modes = ['ohser', 'translation', 'ripley']
for m in modes:
Kest_r = Kest(data=points, radii=r, mode=m)
assert_allclose(Kpos, Kest_r, atol=1e-1)
with NumpyRNGContext(123):
a = np.random.uniform(low=5, high=10, size=(500, 2))
b = np.random.uniform(low=-10, high=-5, size=(500, 2))
@pytest.mark.parametrize("points, low, high", [(a, 5, 10), (b, -10, -5)])
def test_ripley_modes(points, low, high):
Kest = RipleysKEstimator(area=25, x_max=high, y_max=high, x_min=low,
y_min=low)
r = np.linspace(0, 1.2, 25)
Kpos_mean = np.mean(Kest.poisson(r))
modes = ['ohser', 'translation', 'ripley']
for m in modes:
Kest_mean = np.mean(Kest(data=points, radii=r, mode=m))
assert_allclose(Kpos_mean, Kest_mean, atol=1e-1, rtol=1e-1)
with NumpyRNGContext(123):
a = np.random.uniform(low=0, high=1, size=(50, 2))
b = np.random.uniform(low=-1, high=0, size=(50, 2))
@pytest.mark.parametrize("points, low, high", [(a, 0, 1), (b, -1, 0)])
def test_ripley_large_density_var_width(points, low, high):
Kest = RipleysKEstimator(area=1, x_min=low, x_max=high, y_min=low,
y_max=high)
r = np.linspace(0, 0.25, 25)
Kpos = Kest.poisson(r)
Kest_r = Kest(data=points, radii=r, mode='var-width')
assert_allclose(Kpos, Kest_r, atol=1e-1)
with NumpyRNGContext(123):
a = np.random.uniform(low=5, high=10, size=(50, 2))
b = np.random.uniform(low=-10, high=-5, size=(50, 2))
@pytest.mark.parametrize("points, low, high", [(a, 5, 10), (b, -10, -5)])
def test_ripley_var_width(points, low, high):
Kest = RipleysKEstimator(area=25, x_max=high, y_max=high, x_min=low,
y_min=low)
r = np.linspace(0, 1.2, 25)
Kest_ohser = np.mean(Kest(data=points, radii=r, mode='ohser'))
Kest_var_width = np.mean(Kest(data=points, radii=r, mode='var-width'))
assert_allclose(Kest_ohser, Kest_var_width, atol=1e-1, rtol=1e-1)
|
e9dcbf41ad50c0aaf4207c2620bafa5474295e79510f28ae785323ceaf899b27 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.random import randn, normal
from numpy.testing import assert_equal, assert_allclose
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
try:
import mpmath # pylint: disable=W0611
except ImportError:
HAS_MPMATH = False
else:
HAS_MPMATH = True
from .. import funcs
from ... import units as u
from ...tests.helper import catch_warnings
from ...utils.misc import NumpyRNGContext
def test_median_absolute_deviation():
with NumpyRNGContext(12345):
# test that it runs
randvar = randn(10000)
mad = funcs.median_absolute_deviation(randvar)
# test whether an array is returned if an axis is used
randvar = randvar.reshape((10, 1000))
mad = funcs.median_absolute_deviation(randvar, axis=1)
assert len(mad) == 10
assert mad.size < randvar.size
mad = funcs.median_absolute_deviation(randvar, axis=0)
assert len(mad) == 1000
assert mad.size < randvar.size
# Test some actual values in a 3 dimensional array
x = np.arange(3 * 4 * 5)
a = np.array([sum(x[:i + 1]) for i in range(len(x))]).reshape(3, 4, 5)
mad = funcs.median_absolute_deviation(a)
assert mad == 389.5
mad = funcs.median_absolute_deviation(a, axis=0)
assert_allclose(mad, [[210., 230., 250., 270., 290.],
[310., 330., 350., 370., 390.],
[410., 430., 450., 470., 490.],
[510., 530., 550., 570., 590.]])
mad = funcs.median_absolute_deviation(a, axis=1)
assert_allclose(mad, [[27.5, 32.5, 37.5, 42.5, 47.5],
[127.5, 132.5, 137.5, 142.5, 147.5],
[227.5, 232.5, 237.5, 242.5, 247.5]])
mad = funcs.median_absolute_deviation(a, axis=2)
assert_allclose(mad, [[3., 8., 13., 18.],
[23., 28., 33., 38.],
[43., 48., 53., 58.]])
def test_median_absolute_deviation_masked():
# Based on the changes introduces in #4658
# normal masked arrays without masked values are handled like normal
# numpy arrays
array = np.ma.array([1, 2, 3])
assert funcs.median_absolute_deviation(array) == 1
# masked numpy arrays return something different (rank 0 masked array)
# but one can still compare it without np.all!
array = np.ma.array([1, 4, 3], mask=[0, 1, 0])
assert funcs.median_absolute_deviation(array) == 1
# Just cross check if that's identical to the function on the unmasked
# values only
assert funcs.median_absolute_deviation(array) == (
funcs.median_absolute_deviation(array[~array.mask]))
# Multidimensional masked array
array = np.ma.array([[1, 4], [2, 2]], mask=[[1, 0], [0, 0]])
funcs.median_absolute_deviation(array)
assert funcs.median_absolute_deviation(array) == 0
# Just to compare it with the data without mask:
assert funcs.median_absolute_deviation(array.data) == 0.5
# And check if they are also broadcasted correctly
np.testing.assert_array_equal(
funcs.median_absolute_deviation(array, axis=0).data, [0, 1])
np.testing.assert_array_equal(
funcs.median_absolute_deviation(array, axis=1).data, [0, 0])
def test_median_absolute_deviation_nans():
array = np.array([[1, 4, 3, np.nan],
[2, 5, np.nan, 4]])
assert_equal(funcs.median_absolute_deviation(array, func=np.nanmedian,
axis=1), [1, 1])
array = np.ma.masked_invalid(array)
assert funcs.median_absolute_deviation(array) == 1
def test_median_absolute_deviation_multidim_axis():
array = np.ones((5, 4, 3)) * np.arange(5)[:, np.newaxis, np.newaxis]
assert_equal(funcs.median_absolute_deviation(array, axis=(1, 2)),
np.zeros(5))
assert_equal(funcs.median_absolute_deviation(
array, axis=np.array([1, 2])), np.zeros(5))
def test_median_absolute_deviation_quantity():
# Based on the changes introduces in #4658
# Just a small test that this function accepts Quantities and returns a
# quantity
a = np.array([1, 16, 5]) * u.m
mad = funcs.median_absolute_deviation(a)
# Check for the correct unit and that the result is identical to the
# result without units.
assert mad.unit == a.unit
assert mad.value == funcs.median_absolute_deviation(a.value)
@pytest.mark.skipif('not HAS_SCIPY')
def test_binom_conf_interval():
# Test Wilson and Jeffreys interval for corner cases:
# Corner cases: k = 0, k = n, conf = 0., conf = 1.
n = 5
k = [0, 4, 5]
for conf in [0., 0.5, 1.]:
res = funcs.binom_conf_interval(k, n, conf=conf, interval='wilson')
assert ((res >= 0.) & (res <= 1.)).all()
res = funcs.binom_conf_interval(k, n, conf=conf, interval='jeffreys')
assert ((res >= 0.) & (res <= 1.)).all()
# Test Jeffreys interval accuracy against table in Brown et al. (2001).
# (See `binom_conf_interval` docstring for reference.)
k = [0, 1, 2, 3, 4]
n = 7
conf = 0.95
result = funcs.binom_conf_interval(k, n, conf=conf, interval='jeffreys')
table = np.array([[0.000, 0.016, 0.065, 0.139, 0.234],
[0.292, 0.501, 0.648, 0.766, 0.861]])
assert_allclose(result, table, atol=1.e-3, rtol=0.)
# Test scalar version
result = np.array([funcs.binom_conf_interval(kval, n, conf=conf,
interval='jeffreys')
for kval in k]).transpose()
assert_allclose(result, table, atol=1.e-3, rtol=0.)
# Test flat
result = funcs.binom_conf_interval(k, n, conf=conf, interval='flat')
table = np.array([[0., 0.03185, 0.08523, 0.15701, 0.24486],
[0.36941, 0.52650, 0.65085, 0.75513, 0.84298]])
assert_allclose(result, table, atol=1.e-3, rtol=0.)
# Test scalar version
result = np.array([funcs.binom_conf_interval(kval, n, conf=conf,
interval='flat')
for kval in k]).transpose()
assert_allclose(result, table, atol=1.e-3, rtol=0.)
# Test Wald interval
result = funcs.binom_conf_interval(0, 5, interval='wald')
assert_allclose(result, 0.) # conf interval is [0, 0] when k = 0
result = funcs.binom_conf_interval(5, 5, interval='wald')
assert_allclose(result, 1.) # conf interval is [1, 1] when k = n
result = funcs.binom_conf_interval(500, 1000, conf=0.68269,
interval='wald')
assert_allclose(result[0], 0.5 - 0.5 / np.sqrt(1000.))
assert_allclose(result[1], 0.5 + 0.5 / np.sqrt(1000.))
# Test shapes
k = 3
n = 7
for interval in ['wald', 'wilson', 'jeffreys', 'flat']:
result = funcs.binom_conf_interval(k, n, interval=interval)
assert result.shape == (2,)
k = np.array(k)
for interval in ['wald', 'wilson', 'jeffreys', 'flat']:
result = funcs.binom_conf_interval(k, n, interval=interval)
assert result.shape == (2,)
n = np.array(n)
for interval in ['wald', 'wilson', 'jeffreys', 'flat']:
result = funcs.binom_conf_interval(k, n, interval=interval)
assert result.shape == (2,)
k = np.array([1, 3, 5])
for interval in ['wald', 'wilson', 'jeffreys', 'flat']:
result = funcs.binom_conf_interval(k, n, interval=interval)
assert result.shape == (2, 3)
n = np.array([5, 5, 5])
for interval in ['wald', 'wilson', 'jeffreys', 'flat']:
result = funcs.binom_conf_interval(k, n, interval=interval)
assert result.shape == (2, 3)
@pytest.mark.skipif('not HAS_SCIPY')
def test_binned_binom_proportion():
# Check that it works.
nbins = 20
x = np.linspace(0., 10., 100) # Guarantee an `x` in every bin.
success = np.ones(len(x), dtype=bool)
bin_ctr, bin_hw, p, perr = funcs.binned_binom_proportion(x, success,
bins=nbins)
# Check shape of outputs
assert bin_ctr.shape == (nbins,)
assert bin_hw.shape == (nbins,)
assert p.shape == (nbins,)
assert perr.shape == (2, nbins)
# Check that p is 1 in all bins, since success = True for all `x`.
assert (p == 1.).all()
# Check that p is 0 in all bins if success = False for all `x`.
success[:] = False
bin_ctr, bin_hw, p, perr = funcs.binned_binom_proportion(x, success,
bins=nbins)
assert (p == 0.).all()
def test_signal_to_noise_oir_ccd():
result = funcs.signal_to_noise_oir_ccd(1, 25, 0, 0, 0, 1)
assert 5.0 == result
# check to make sure gain works
result = funcs.signal_to_noise_oir_ccd(1, 5, 0, 0, 0, 1, 5)
assert 5.0 == result
# now add in sky, dark current, and read noise
# make sure the snr goes down
result = funcs.signal_to_noise_oir_ccd(1, 25, 1, 0, 0, 1)
assert result < 5.0
result = funcs.signal_to_noise_oir_ccd(1, 25, 0, 1, 0, 1)
assert result < 5.0
result = funcs.signal_to_noise_oir_ccd(1, 25, 0, 0, 1, 1)
assert result < 5.0
# make sure snr increases with time
result = funcs.signal_to_noise_oir_ccd(2, 25, 0, 0, 0, 1)
assert result > 5.0
def test_bootstrap():
bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
# test general bootstrapping
answer = np.array([[7, 4, 8, 5, 7, 0, 3, 7, 8, 5],
[4, 8, 8, 3, 6, 5, 2, 8, 6, 2]])
with NumpyRNGContext(42):
assert_equal(answer, funcs.bootstrap(bootarr, 2))
# test with a bootfunction
with NumpyRNGContext(42):
bootresult = np.mean(funcs.bootstrap(bootarr, 10000, bootfunc=np.mean))
assert_allclose(np.mean(bootarr), bootresult, atol=0.01)
@pytest.mark.skipif('not HAS_SCIPY')
def test_bootstrap_multiple_outputs():
from scipy.stats import spearmanr
# test a bootfunc with several output values
# return just bootstrapping with one output from bootfunc
with NumpyRNGContext(42):
bootarr = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
[4, 8, 8, 3, 6, 5, 2, 8, 6, 2]]).T
answer = np.array((0.19425, 0.02094))
def bootfunc(x): return spearmanr(x)[0]
bootresult = funcs.bootstrap(bootarr, 2,
bootfunc=bootfunc)
assert_allclose(answer, bootresult, atol=1e-3)
# test a bootfunc with several output values
# return just bootstrapping with the second output from bootfunc
with NumpyRNGContext(42):
bootarr = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
[4, 8, 8, 3, 6, 5, 2, 8, 6, 2]]).T
answer = np.array((0.5907,
0.9541))
def bootfunc(x): return spearmanr(x)[1]
bootresult = funcs.bootstrap(bootarr, 2,
bootfunc=bootfunc)
assert_allclose(answer, bootresult, atol=1e-3)
# return just bootstrapping with two outputs from bootfunc
with NumpyRNGContext(42):
answer = np.array(((0.1942, 0.5907),
(0.0209, 0.9541),
(0.4286, 0.2165)))
def bootfunc(x): return spearmanr(x)
bootresult = funcs.bootstrap(bootarr, 3,
bootfunc=bootfunc)
assert bootresult.shape == (3, 2)
assert_allclose(answer, bootresult, atol=1e-3)
def test_mad_std():
with NumpyRNGContext(12345):
data = normal(5, 2, size=(100, 100))
assert_allclose(funcs.mad_std(data), 2.0, rtol=0.05)
def test_mad_std_scalar_return():
with NumpyRNGContext(12345):
data = normal(5, 2, size=(10, 10))
# make a masked array with no masked points
data = np.ma.masked_where(np.isnan(data), data)
rslt = funcs.mad_std(data)
# want a scalar result, NOT a masked array
assert np.isscalar(rslt)
data[5, 5] = np.nan
rslt = funcs.mad_std(data, ignore_nan=True)
assert np.isscalar(rslt)
with catch_warnings():
rslt = funcs.mad_std(data)
assert np.isscalar(rslt)
try:
assert not np.isnan(rslt)
# This might not be an issue anymore when only numpy>=1.13 is
# supported. NUMPY_LT_1_13 xref #7267
except AssertionError:
pytest.xfail('See #5232')
def test_mad_std_warns():
with NumpyRNGContext(12345):
data = normal(5, 2, size=(10, 10))
data[5, 5] = np.nan
with catch_warnings() as warns:
rslt = funcs.mad_std(data, ignore_nan=False)
assert np.isnan(rslt)
def test_mad_std_withnan():
with NumpyRNGContext(12345):
data = np.empty([102, 102])
data[:] = np.nan
data[1:-1, 1:-1] = normal(5, 2, size=(100, 100))
assert_allclose(funcs.mad_std(data, ignore_nan=True), 2.0, rtol=0.05)
assert np.isnan(funcs.mad_std([1, 2, 3, 4, 5, np.nan]))
assert_allclose(funcs.mad_std([1, 2, 3, 4, 5, np.nan], ignore_nan=True),
1.482602218505602)
def test_mad_std_with_axis():
data = np.array([[1, 2, 3, 4],
[4, 3, 2, 1]])
# results follow data symmetry
result_axis0 = np.array([2.22390333, 0.74130111, 0.74130111,
2.22390333])
result_axis1 = np.array([1.48260222, 1.48260222])
assert_allclose(funcs.mad_std(data, axis=0), result_axis0)
assert_allclose(funcs.mad_std(data, axis=1), result_axis1)
def test_mad_std_with_axis_and_nan():
data = np.array([[1, 2, 3, 4, np.nan],
[4, 3, 2, 1, np.nan]])
# results follow data symmetry
result_axis0 = np.array([2.22390333, 0.74130111, 0.74130111,
2.22390333, np.nan])
result_axis1 = np.array([1.48260222, 1.48260222])
assert_allclose(funcs.mad_std(data, axis=0, ignore_nan=True), result_axis0)
assert_allclose(funcs.mad_std(data, axis=1, ignore_nan=True), result_axis1)
def test_mad_std_with_axis_and_nan_array_type():
# mad_std should return a masked array if given one, and not otherwise
data = np.array([[1, 2, 3, 4, np.nan],
[4, 3, 2, 1, np.nan]])
result = funcs.mad_std(data, axis=0, ignore_nan=True)
assert not np.ma.isMaskedArray(result)
data = np.ma.masked_where(np.isnan(data), data)
result = funcs.mad_std(data, axis=0, ignore_nan=True)
assert np.ma.isMaskedArray(result)
def test_gaussian_fwhm_to_sigma():
fwhm = (2.0 * np.sqrt(2.0 * np.log(2.0)))
assert_allclose(funcs.gaussian_fwhm_to_sigma * fwhm, 1.0, rtol=1.0e-6)
def test_gaussian_sigma_to_fwhm():
sigma = 1.0 / (2.0 * np.sqrt(2.0 * np.log(2.0)))
assert_allclose(funcs.gaussian_sigma_to_fwhm * sigma, 1.0, rtol=1.0e-6)
def test_gaussian_sigma_to_fwhm_to_sigma():
assert_allclose(funcs.gaussian_fwhm_to_sigma *
funcs.gaussian_sigma_to_fwhm, 1.0)
def test_poisson_conf_interval_rootn():
assert_allclose(funcs.poisson_conf_interval(16, interval='root-n'),
(12, 20))
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('interval', ['root-n-0',
'pearson',
'sherpagehrels',
'frequentist-confidence'])
def test_poisson_conf_large(interval):
n = 100
assert_allclose(funcs.poisson_conf_interval(n, interval='root-n'),
funcs.poisson_conf_interval(n, interval=interval),
rtol=2e-2)
def test_poisson_conf_array_rootn0_zero():
n = np.zeros((3, 4, 5))
assert_allclose(funcs.poisson_conf_interval(n, interval='root-n-0'),
funcs.poisson_conf_interval(n[0, 0, 0], interval='root-n-0')[:, None, None, None] * np.ones_like(n))
assert not np.any(np.isnan(
funcs.poisson_conf_interval(n, interval='root-n-0')))
@pytest.mark.skipif('not HAS_SCIPY')
def test_poisson_conf_array_frequentist_confidence_zero():
n = np.zeros((3, 4, 5))
assert_allclose(
funcs.poisson_conf_interval(n, interval='frequentist-confidence'),
funcs.poisson_conf_interval(n[0, 0, 0], interval='frequentist-confidence')[:, None, None, None] * np.ones_like(n))
assert not np.any(np.isnan(
funcs.poisson_conf_interval(n, interval='root-n-0')))
def test_poisson_conf_list_rootn0_zero():
n = [0, 0, 0]
assert_allclose(funcs.poisson_conf_interval(n, interval='root-n-0'),
[[0, 0, 0], [1, 1, 1]])
assert not np.any(np.isnan(
funcs.poisson_conf_interval(n, interval='root-n-0')))
def test_poisson_conf_array_rootn0():
n = 7 * np.ones((3, 4, 5))
assert_allclose(funcs.poisson_conf_interval(n, interval='root-n-0'),
funcs.poisson_conf_interval(n[0, 0, 0], interval='root-n-0')[:, None, None, None] * np.ones_like(n))
n[1, 2, 3] = 0
assert not np.any(np.isnan(
funcs.poisson_conf_interval(n, interval='root-n-0')))
@pytest.mark.skipif('not HAS_SCIPY')
def test_poisson_conf_array_fc():
n = 7 * np.ones((3, 4, 5))
assert_allclose(
funcs.poisson_conf_interval(n, interval='frequentist-confidence'),
funcs.poisson_conf_interval(n[0, 0, 0], interval='frequentist-confidence')[:, None, None, None] * np.ones_like(n))
n[1, 2, 3] = 0
assert not np.any(np.isnan(
funcs.poisson_conf_interval(n, interval='frequentist-confidence')))
@pytest.mark.skipif('not HAS_SCIPY')
def test_poisson_conf_frequentist_confidence_gehrels():
"""Test intervals against those published in Gehrels 1986"""
nlh = np.array([(0, 0, 1.841),
(1, 0.173, 3.300),
(2, 0.708, 4.638),
(3, 1.367, 5.918),
(4, 2.086, 7.163),
(5, 2.840, 8.382),
(6, 3.620, 9.584),
(7, 4.419, 10.77),
(8, 5.232, 11.95),
(9, 6.057, 13.11),
(10, 6.891, 14.27),
])
assert_allclose(
funcs.poisson_conf_interval(nlh[:, 0],
interval='frequentist-confidence'),
nlh[:, 1:].T, rtol=0.001, atol=0.001)
@pytest.mark.skipif('not HAS_SCIPY')
def test_poisson_conf_frequentist_confidence_gehrels_2sigma():
"""Test intervals against those published in Gehrels 1986
Note: I think there's a typo (transposition of digits) in Gehrels 1986,
specifically for the two-sigma lower limit for 3 events; they claim
0.569 but this function returns 0.59623...
"""
nlh = np.array([(0, 2, 0, 3.783),
(1, 2, 2.30e-2, 5.683),
(2, 2, 0.230, 7.348),
(3, 2, 0.596, 8.902),
(4, 2, 1.058, 10.39),
(5, 2, 1.583, 11.82),
(6, 2, 2.153, 13.22),
(7, 2, 2.758, 14.59),
(8, 2, 3.391, 15.94),
(9, 2, 4.046, 17.27),
(10, 2, 4.719, 18.58)])
assert_allclose(
funcs.poisson_conf_interval(nlh[:, 0], sigma=2,
interval='frequentist-confidence').T,
nlh[:, 2:], rtol=0.01)
@pytest.mark.skipif('not HAS_SCIPY')
def test_poisson_conf_frequentist_confidence_gehrels_3sigma():
"""Test intervals against those published in Gehrels 1986"""
nlh = np.array([(0, 3, 0, 6.608),
(1, 3, 1.35e-3, 8.900),
(2, 3, 5.29e-2, 10.87),
(3, 3, 0.212, 12.68),
(4, 3, 0.465, 14.39),
(5, 3, 0.792, 16.03),
(6, 3, 1.175, 17.62),
(7, 3, 1.603, 19.17),
(8, 3, 2.068, 20.69),
(9, 3, 2.563, 22.18),
(10, 3, 3.084, 23.64),
])
assert_allclose(
funcs.poisson_conf_interval(nlh[:, 0], sigma=3,
interval='frequentist-confidence').T,
nlh[:, 2:], rtol=0.01, verbose=True)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('n', [0, 1, 2, 3, 10, 20, 100])
def test_poisson_conf_gehrels86(n):
assert_allclose(
funcs.poisson_conf_interval(n, interval='sherpagehrels')[1],
funcs.poisson_conf_interval(n, interval='frequentist-confidence')[1],
rtol=0.02)
@pytest.mark.skipif('not HAS_SCIPY')
def test_scipy_poisson_limit():
'''Test that the lower-level routine gives the snae number.
Test numbers are from table1 1, 3 in
Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <http://adsabs.harvard.edu/abs/1991ApJ...374..344K>`_
'''
assert_allclose(funcs._scipy_kraft_burrows_nousek(5., 2.5, .99),
(0, 10.67), rtol=1e-3)
conf = funcs.poisson_conf_interval([5., 6.], 'kraft-burrows-nousek',
background=[2.5, 2.],
conflevel=[.99, .9])
assert_allclose(conf[:, 0], (0, 10.67), rtol=1e-3)
assert_allclose(conf[:, 1], (0.81, 8.99), rtol=5e-3)
@pytest.mark.skipif('not HAS_MPMATH')
def test_mpmath_poisson_limit():
assert_allclose(funcs._mpmath_kraft_burrows_nousek(6., 2., .9),
(0.81, 8.99), rtol=5e-3)
assert_allclose(funcs._mpmath_kraft_burrows_nousek(5., 2.5, .99),
(0, 10.67), rtol=1e-3)
@pytest.mark.skipif('not HAS_SCIPY')
def test_poisson_conf_value_errors():
with pytest.raises(ValueError) as e:
funcs.poisson_conf_interval([5, 6], 'root-n', sigma=2)
assert 'Only sigma=1 supported' in str(e.value)
with pytest.raises(ValueError) as e:
funcs.poisson_conf_interval([5, 6], 'pearson', background=[2.5, 2.])
assert 'background not supported' in str(e.value)
with pytest.raises(ValueError) as e:
funcs.poisson_conf_interval([5, 6], 'sherpagehrels',
conflevel=[2.5, 2.])
assert 'conflevel not supported' in str(e.value)
with pytest.raises(ValueError) as e:
funcs.poisson_conf_interval(1, 'foo')
assert 'Invalid method' in str(e.value)
@pytest.mark.skipif('not HAS_SCIPY')
def test_poisson_conf_kbn_value_errors():
with pytest.raises(ValueError) as e:
funcs.poisson_conf_interval(5., 'kraft-burrows-nousek',
background=2.5,
conflevel=99)
assert 'number between 0 and 1' in str(e.value)
with pytest.raises(ValueError) as e:
funcs.poisson_conf_interval(5., 'kraft-burrows-nousek',
background=2.5)
assert 'Set conflevel for method' in str(e.value)
with pytest.raises(ValueError) as e:
funcs.poisson_conf_interval(5., 'kraft-burrows-nousek',
background=-2.5,
conflevel=.99)
assert 'Background must be' in str(e.value)
@pytest.mark.skipif('HAS_SCIPY or HAS_MPMATH')
def test_poisson_limit_nodependencies():
with pytest.raises(ImportError):
funcs.poisson_conf_interval(20., interval='kraft-burrows-nousek',
background=10., conflevel=.95)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('N', [10, 100, 1000, 10000])
def test_uniform(N):
with NumpyRNGContext(12345):
assert funcs.kuiper(np.random.random(N))[1] > 0.01
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('N,M', [(100, 100),
(20, 100),
(100, 20),
(10, 20),
(5, 5),
(1000, 100)])
def test_kuiper_two_uniform(N, M):
with NumpyRNGContext(12345):
assert funcs.kuiper_two(np.random.random(N),
np.random.random(M))[1] > 0.01
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('N,M', [(100, 100),
(20, 100),
(100, 20),
(10, 20),
(5, 5),
(1000, 100)])
def test_kuiper_two_nonuniform(N, M):
with NumpyRNGContext(12345):
assert funcs.kuiper_two(np.random.random(N)**2,
np.random.random(M)**2)[1] > 0.01
@pytest.mark.skipif('not HAS_SCIPY')
def test_detect_kuiper_two_different():
with NumpyRNGContext(12345):
D, f = funcs.kuiper_two(np.random.random(500) * 0.5,
np.random.random(500))
assert f < 0.01
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('N,M', [(100, 100),
(20, 100),
(100, 20),
(10, 20),
(5, 5),
(1000, 100)])
def test_fpp_kuiper_two(N, M):
with NumpyRNGContext(12345):
R = 100
fpp = 0.05
fps = 0
for i in range(R):
D, f = funcs.kuiper_two(np.random.random(N), np.random.random(M))
if f < fpp:
fps += 1
assert scipy.stats.binom(R, fpp).sf(fps - 1) > 0.005
assert scipy.stats.binom(R, fpp).cdf(fps - 1) > 0.005
@pytest.mark.skipif('not HAS_SCIPY')
def test_histogram():
with NumpyRNGContext(1234):
a, b = 0.3, 3.14
s = np.random.uniform(a, b, 10000) % 1
b, w = funcs.fold_intervals([(a, b, 1. / (b - a))])
h = funcs.histogram_intervals(16, b, w)
nn, bb = np.histogram(s, bins=len(h), range=(0, 1))
uu = np.sqrt(nn)
nn, uu = len(h) * nn / h / len(s), len(h) * uu / h / len(s)
c2 = np.sum(((nn - 1) / uu)**2)
assert scipy.stats.chi2(len(h)).cdf(c2) > 0.01
assert scipy.stats.chi2(len(h)).sf(c2) > 0.01
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize("ii,rr", [
((4, (0, 1), (1,)), (1, 1, 1, 1)),
((2, (0, 1), (1,)), (1, 1)),
((4, (0, 0.5, 1), (1, 1)), (1, 1, 1, 1)),
((4, (0, 0.5, 1), (1, 2)), (1, 1, 2, 2)),
((3, (0, 0.5, 1), (1, 2)), (1, 1.5, 2)),
])
def test_histogram_intervals_known(ii, rr):
with NumpyRNGContext(1234):
assert_allclose(funcs.histogram_intervals(*ii), rr)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('N,m,p', [(100, 10000, 0.01),
(300, 10000, 0.001),
(10, 10000, 0.001),
])
def test_uniform_binomial(N, m, p):
"""Check that the false positive probability is right
In particular, run m trials with N uniformly-distributed photons
and check that the number of false positives is consistent with
a binomial distribution. The more trials, the tighter the bounds
but the longer the runtime.
"""
with NumpyRNGContext(1234):
fpps = [funcs.kuiper(np.random.random(N))[1]
for i in range(m)]
assert (scipy.stats.binom(n=m, p=p).ppf(0.01) <
len([fpp for fpp in fpps if fpp < p]) <
scipy.stats.binom(n=m, p=p).ppf(0.99))
|
f6a557eb5f8af42804d959151bc4fb5c0c1b944cd295e2b68e54a7d5427c1e42 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.random import randn, normal
from numpy.testing import assert_equal, assert_allclose
from ..biweight import (biweight_location, biweight_scale,
biweight_midvariance, biweight_midcovariance,
biweight_midcorrelation)
from ...tests.helper import catch_warnings
from ...utils.misc import NumpyRNGContext
def test_biweight_location():
with NumpyRNGContext(12345):
# test that it runs
randvar = randn(10000)
cbl = biweight_location(randvar)
assert abs(cbl - 0) < 1e-2
def test_biweight_location_small():
cbl = biweight_location([1, 3, 5, 500, 2])
assert abs(cbl - 2.745) < 1e-3
def test_biweight_location_axis():
"""Test a 2D array with the axis keyword."""
with NumpyRNGContext(12345):
ny = 100
nx = 200
data = normal(5, 2, (ny, nx))
bw = biweight_location(data, axis=0)
bwi = []
for i in range(nx):
bwi.append(biweight_location(data[:, i]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
bw = biweight_location(data, axis=1)
bwi = []
for i in range(ny):
bwi.append(biweight_location(data[i, :]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
def test_biweight_location_axis_3d():
"""Test a 3D array with the axis keyword."""
with NumpyRNGContext(12345):
nz = 3
ny = 4
nx = 5
data = normal(5, 2, (nz, ny, nx))
bw = biweight_location(data, axis=0)
assert bw.shape == (ny, nx)
y = 0
bwi = []
for i in range(nx):
bwi.append(biweight_location(data[:, y, i]))
bwi = np.array(bwi)
assert_allclose(bw[y], bwi)
def test_biweight_scale():
# NOTE: biweight_scale is covered by biweight_midvariance tests
data = [1, 3, 5, 500, 2]
scl = biweight_scale(data)
var = biweight_midvariance(data)
assert_allclose(scl, np.sqrt(var))
def test_biweight_midvariance():
with NumpyRNGContext(12345):
# test that it runs
randvar = randn(10000)
var = biweight_midvariance(randvar)
assert_allclose(var, 1.0, rtol=0.02)
def test_biweight_midvariance_small():
data = [1, 3, 5, 500, 2]
var = biweight_midvariance(data)
assert_allclose(var, 2.9238456) # verified with R
var = biweight_midvariance(data, modify_sample_size=True)
assert_allclose(var, 2.3390765)
def test_biweight_midvariance_5127():
# test a regression introduced in #5127
rand = np.random.RandomState(12345)
data = rand.normal(loc=0., scale=20., size=(100, 100))
var = biweight_midvariance(data)
assert_allclose(var, 406.86938710817344) # verified with R
def test_biweight_midvariance_axis():
"""Test a 2D array with the axis keyword."""
with NumpyRNGContext(12345):
ny = 100
nx = 200
data = normal(5, 2, (ny, nx))
bw = biweight_midvariance(data, axis=0)
bwi = []
for i in range(nx):
bwi.append(biweight_midvariance(data[:, i]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
bw = biweight_midvariance(data, axis=1)
bwi = []
for i in range(ny):
bwi.append(biweight_midvariance(data[i, :]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
def test_biweight_midvariance_axis_3d():
"""Test a 3D array with the axis keyword."""
with NumpyRNGContext(12345):
nz = 3
ny = 4
nx = 5
data = normal(5, 2, (nz, ny, nx))
bw = biweight_midvariance(data, axis=0)
assert bw.shape == (ny, nx)
y = 0
bwi = []
for i in range(nx):
bwi.append(biweight_midvariance(data[:, y, i]))
bwi = np.array(bwi)
assert_allclose(bw[y], bwi)
def test_biweight_midcovariance_1d():
d = [0, 1, 2]
cov = biweight_midcovariance(d)
var = biweight_midvariance(d)
assert_allclose(cov, [[var]])
def test_biweight_midcovariance_2d():
d = [[0, 1, 2], [2, 1, 0]]
cov = biweight_midcovariance(d)
val = 0.70121809
assert_allclose(cov, [[val, -val], [-val, val]]) # verified with R
d = [[5, 1, 10], [500, 5, 2]]
cov = biweight_midcovariance(d)
assert_allclose(cov, [[14.54159077, -7.79026256], # verified with R
[-7.79026256, 6.92087252]])
cov = biweight_midcovariance(d, modify_sample_size=True)
assert_allclose(cov, [[14.54159077, -5.19350838],
[-5.19350838, 4.61391501]])
def test_biweight_midcovariance_midvariance():
"""
Test that biweight_midcovariance diagonal elements agree with
biweight_midvariance.
"""
rng = np.random.RandomState(1)
d = rng.normal(0, 2, size=(100, 3))
cov = biweight_midcovariance(d)
var = [biweight_midvariance(a) for a in d]
assert_allclose(cov.diagonal(), var)
cov2 = biweight_midcovariance(d, modify_sample_size=True)
var2 = [biweight_midvariance(a, modify_sample_size=True)
for a in d]
assert_allclose(cov2.diagonal(), var2)
def test_midcovariance_shape():
"""
Test that biweight_midcovariance raises error with a 3D array.
"""
d = np.ones(27).reshape(3, 3, 3)
with pytest.raises(ValueError) as e:
biweight_midcovariance(d)
assert 'The input array must be 2D or 1D.' in str(e.value)
def test_midcovariance_M_shape():
"""
Test that biweight_midcovariance raises error when M is not a scalar
or 1D array.
"""
d = [0, 1, 2]
M = [[0, 1], [2, 3]]
with pytest.raises(ValueError) as e:
biweight_midcovariance(d, M=M)
assert 'M must be a scalar or 1D array.' in str(e.value)
def test_biweight_midcovariance_symmetric():
"""
Regression test to ensure that midcovariance matrix is symmetric
when ``modify_sample_size=True`` (see #5972).
"""
rng = np.random.RandomState(1)
d = rng.gamma(2, 2, size=(3, 500))
cov = biweight_midcovariance(d)
assert_equal(cov, cov.T)
cov = biweight_midcovariance(d, modify_sample_size=True)
assert_equal(cov, cov.T)
def test_biweight_midcorrelation():
x = [0, 1, 2]
y = [2, 1, 0]
assert_allclose(biweight_midcorrelation(x, x), 1.0)
assert_allclose(biweight_midcorrelation(x, y), -1.0)
x = [5, 1, 10, 12.4, 13.2]
y = [500, 5, 2, 7.1, 0.9]
# verified with R
assert_allclose(biweight_midcorrelation(x, y), -0.14411038976763313)
def test_biweight_midcorrelation_inputs():
a1 = np.ones((3, 3))
a2 = np.ones(5)
a3 = np.ones(7)
with pytest.raises(ValueError) as e:
biweight_midcorrelation(a1, a2)
assert 'x must be a 1D array.' in str(e.value)
with pytest.raises(ValueError) as e:
biweight_midcorrelation(a2, a1)
assert 'y must be a 1D array.' in str(e.value)
with pytest.raises(ValueError) as e:
biweight_midcorrelation(a2, a3)
assert 'x and y must have the same shape.' in str(e.value)
def test_biweight_32bit_runtime_warnings():
"""Regression test for #6905."""
with NumpyRNGContext(12345):
data = np.random.random(100).astype(np.float32)
data[50] = 30000.
with catch_warnings(RuntimeWarning) as warning_lines:
biweight_scale(data)
assert len(warning_lines) == 0
with catch_warnings(RuntimeWarning) as warning_lines:
biweight_midvariance(data)
assert len(warning_lines) == 0
|
b134d1f36a745e20d33e7d93462c358f206a408e9b7d7deb2398861fc3cd8658 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import pytest
import numpy as np
from numpy.testing import assert_allclose
from ..convolve import convolve_fft
VALID_DTYPES = []
for dtype_array in ['>f4', '<f4', '>f8', '<f8']:
for dtype_kernel in ['>f4', '<f4', '>f8', '<f8']:
VALID_DTYPES.append((dtype_array, dtype_kernel))
BOUNDARY_OPTIONS = [None, 'fill', 'wrap']
NANTREATMENT_OPTIONS = ('interpolate', 'fill')
"""
What does convolution mean? We use the 'same size' assumption here (i.e.,
you expect an array of the exact same size as the one you put in)
Convolving any array with a kernel that is [1] should result in the same array returned
Working example array: [1, 2, 3, 4, 5]
Convolved with [1] = [1, 2, 3, 4, 5]
Convolved with [1, 1] = [1, 3, 5, 7, 9] THIS IS NOT CONSISTENT!
Convolved with [1, 0] = [1, 2, 3, 4, 5]
Convolved with [0, 1] = [0, 1, 2, 3, 4]
"""
# NOTE: use_numpy_fft is redundant if you don't have FFTW installed
option_names = ('boundary', 'nan_treatment', 'normalize_kernel')
options = list(itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
(True, False),
))
option_names_preserve_nan = ('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan')
options_preserve_nan = list(itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
(True, False),
(True, False)))
def assert_floatclose(x, y):
"""Assert arrays are close to within expected floating point rounding.
Check that the result is correct at the precision expected for 64 bit
numbers, taking account that the tolerance has to reflect that all powers
in the FFTs enter our values.
"""
# The number used is set by the fact that the Windows FFT sometimes
# returns an answer that is EXACTLY 10*np.spacing.
assert_allclose(x, y, atol=10*np.spacing(x.max()), rtol=0.)
class TestConvolve1D:
@pytest.mark.parametrize(option_names, options)
def test_unity_1_none(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a unit kernel with a single element returns the same array
'''
x = np.array([1., 2., 3.], dtype='float64')
y = np.array([1.], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_unity_3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None).
'''
x = np.array([1., 2., 3.], dtype='float64')
y = np.array([0., 1., 0.], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_uniform_3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements
'''
x = np.array([1., 0., 3.], dtype='float64')
y = np.array([1., 1., 1.], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
answer_key = (boundary, nan_treatment, normalize_kernel)
answer_dict = {
'sum_fill_zeros': np.array([1., 4., 3.], dtype='float64'),
'average_fill_zeros': np.array([1 / 3., 4 / 3., 1.], dtype='float64'),
'sum_wrap': np.array([4., 4., 4.], dtype='float64'),
'average_wrap': np.array([4 / 3., 4 / 3., 4 / 3.], dtype='float64'),
}
result_dict = {
# boundary, nan_treatment, normalize_kernel
('fill', 'interpolate', True): answer_dict['average_fill_zeros'],
('wrap', 'interpolate', True): answer_dict['average_wrap'],
('fill', 'interpolate', False): answer_dict['sum_fill_zeros'],
('wrap', 'interpolate', False): answer_dict['sum_wrap'],
}
for k in list(result_dict.keys()):
result_dict[(k[0], 'fill', k[2])] = result_dict[k]
for k in list(result_dict.keys()):
if k[0] == 'fill':
result_dict[(None, k[1], k[2])] = result_dict[k]
assert_floatclose(z, result_dict[answer_key])
@pytest.mark.parametrize(option_names, options)
def test_halfity_3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that the different modes are producing the correct results using
a uniform, non-unity kernel with three elements
'''
x = np.array([1., 0., 3.], dtype='float64')
y = np.array([0.5, 0.5, 0.5], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
answer_dict = {
'sum': np.array([0.5, 2.0, 1.5], dtype='float64'),
'sum_zeros': np.array([0.5, 2., 1.5], dtype='float64'),
'sum_nozeros': np.array([0.5, 2., 1.5], dtype='float64'),
'average': np.array([1 / 3., 4 / 3., 1.], dtype='float64'),
'sum_wrap': np.array([2., 2., 2.], dtype='float64'),
'average_wrap': np.array([4 / 3., 4 / 3., 4 / 3.], dtype='float64'),
'average_zeros': np.array([1 / 3., 4 / 3., 1.], dtype='float64'),
'average_nozeros': np.array([0.5, 4 / 3., 1.5], dtype='float64'),
}
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
else:
# average = average_zeros; sum = sum_zeros
answer_key += '_zeros'
assert_floatclose(z, answer_dict[answer_key])
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_unity_3_withnan(self, boundary, nan_treatment, normalize_kernel,
preserve_nan):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
'''
x = np.array([1., np.nan, 3.], dtype='float64')
y = np.array([0., 1., 0.], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
z = np.nan_to_num(z)
assert_floatclose(z, [1., 0., 3.])
inputs = (np.array([1., np.nan, 3.], dtype='float64'),
np.array([1., np.inf, 3.], dtype='float64'))
outputs = (np.array([1., 0., 3.], dtype='float64'),
np.array([1., 0., 3.], dtype='float64'))
options_unity1withnan = list(itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
(True, False),
(True, False),
inputs, outputs))
@pytest.mark.parametrize(option_names_preserve_nan + ('inval', 'outval'),
options_unity1withnan)
def test_unity_1_withnan(self, boundary, nan_treatment, normalize_kernel,
preserve_nan, inval, outval):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
'''
x = inval
y = np.array([1.], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
z = np.nan_to_num(z)
assert_floatclose(z, outval)
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_uniform_3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements. This version includes a NaN
value in the original array.
'''
x = np.array([1., np.nan, 3.], dtype='float64')
y = np.array([1., 1., 1.], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
answer_dict = {
'sum': np.array([1., 4., 3.], dtype='float64'),
'sum_nozeros': np.array([1., 4., 3.], dtype='float64'),
'sum_zeros': np.array([1., 4., 3.], dtype='float64'),
'sum_nozeros_interpnan': np.array([1., 4., 3.], dtype='float64'),
'average': np.array([1., 2., 3.], dtype='float64'),
'sum_wrap': np.array([4., 4., 4.], dtype='float64'),
'average_wrap': np.array([4/3., 4/3., 4/3.], dtype='float64'),
'average_wrap_interpnan': np.array([2, 2, 2], dtype='float64'),
'average_nozeros': np.array([1/2., 4/3., 3/2.], dtype='float64'),
'average_nozeros_interpnan': np.array([1., 2., 3.], dtype='float64'),
'average_zeros': np.array([1 / 3., 4 / 3., 3 / 3.], dtype='float64'),
'average_zeros_interpnan': np.array([1 / 2., 4 / 2., 3 / 2.], dtype='float64'),
}
for key in list(answer_dict.keys()):
if 'sum' in key:
answer_dict[key+"_interpnan"] = answer_dict[key] * 3./2.
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
else:
# average = average_zeros; sum = sum_zeros
answer_key += '_zeros'
if nan_treatment == 'interpolate':
answer_key += '_interpnan'
posns = np.where(np.isfinite(z))
assert_floatclose(z[posns], answer_dict[answer_key][posns])
def test_nan_fill(self):
# Test masked array
array = np.array([1., np.nan, 3.], dtype='float64')
kernel = np.array([1, 1, 1])
masked_array = np.ma.masked_array(array, mask=[0, 1, 0])
result = convolve_fft(masked_array, kernel, boundary='fill', fill_value=np.nan)
assert_floatclose(result, [1, 2, 3])
def test_masked_array(self):
"""
Check whether convolve_fft works with masked arrays.
"""
# Test masked array
array = np.array([1., np.nan, 3.], dtype='float64')
kernel = np.array([1, 1, 1])
masked_array = np.ma.masked_array(array, mask=[0, 1, 0])
result = convolve_fft(masked_array, kernel, boundary='fill', fill_value=np.nan)
assert_floatclose(result, [1, 2, 3])
# Test masked kernel
array = np.array([1., np.nan, 3.], dtype='float64')
kernel = np.array([1, 1, 1])
masked_array = np.ma.masked_array(array, mask=[0, 1, 0])
result = convolve_fft(masked_array, kernel, boundary='fill', fill_value=np.nan)
assert_floatclose(result, [1, 2, 3])
def test_normalize_function(self):
"""
Check if convolve_fft works when passing a normalize function.
"""
array = [1, 2, 3]
kernel = [3, 3, 3]
result = convolve_fft(array, kernel, normalize_kernel=np.max)
assert_floatclose(result, [3, 6, 5])
@pytest.mark.parametrize(option_names, options)
def test_normalization_is_respected(self, boundary,
nan_treatment,
normalize_kernel):
"""
Check that if normalize_kernel is False then the normalization
tolerance is respected.
"""
array = np.array([1, 2, 3])
# A simple identity kernel to which a non-zero normalization is added.
base_kernel = np.array([1.0])
# Use the same normalization error tolerance in all cases.
normalization_rtol = 1e-4
# Add the error below to the kernel.
norm_error = [normalization_rtol / 10, normalization_rtol * 10]
for err in norm_error:
kernel = base_kernel + err
result = convolve_fft(array, kernel,
normalize_kernel=normalize_kernel,
nan_treatment=nan_treatment,
normalization_zero_tol=normalization_rtol)
if normalize_kernel:
# Kernel has been normalized to 1.
assert_floatclose(result, array)
else:
# Kernel should not have been normalized...
assert_floatclose(result, array * kernel)
class TestConvolve2D:
@pytest.mark.parametrize(option_names, options)
def test_unity_1x1_none(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a 1x1 unit kernel returns the same array
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='float64')
y = np.array([[1.]], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_unity_3x3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None).
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='float64')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_uniform_3x3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel.
'''
x = np.array([[0., 0., 3.],
[1., 0., 0.],
[0., 2., 0.]], dtype='float64')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
fill_value=np.nan if normalize_kernel else 0,
normalize_kernel=normalize_kernel)
w = np.array([[4., 6., 4.],
[6., 9., 6.],
[4., 6., 4.]], dtype='float64')
answer_dict = {
'sum': np.array([[1., 4., 3.],
[3., 6., 5.],
[3., 3., 2.]], dtype='float64'),
'sum_wrap': np.array([[6., 6., 6.],
[6., 6., 6.],
[6., 6., 6.]], dtype='float64'),
}
answer_dict['average'] = answer_dict['sum'] / w
answer_dict['average_wrap'] = answer_dict['sum_wrap'] / 9.
answer_dict['average_withzeros'] = answer_dict['sum'] / 9.
answer_dict['sum_withzeros'] = answer_dict['sum']
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
elif nan_treatment == 'fill':
answer_key += '_withzeros'
a = answer_dict[answer_key]
assert_floatclose(z, a)
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_unity_3x3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
'''
x = np.array([[1., 2., 3.],
[4., np.nan, 6.],
[7., 8., 9.]], dtype='float64')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1, 1])
z = np.nan_to_num(z)
x = np.nan_to_num(x)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_uniform_3x3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[0., 0., 3.],
[1., np.nan, 0.],
[0., 2., 0.]], dtype='float64')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='float64')
# commented out: allow unnormalized nan-ignoring convolution
# # kernel is not normalized, so this situation -> exception
# if nan_treatment and not normalize_kernel:
# with pytest.raises(ValueError):
# z = convolve_fft(x, y, boundary=boundary,
# nan_treatment=nan_treatment,
# normalize_kernel=normalize_kernel,
# ignore_edge_zeros=ignore_edge_zeros,
# )
# return
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
fill_value=np.nan if normalize_kernel else 0,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1, 1])
# weights
w_n = np.array([[3., 5., 3.],
[5., 8., 5.],
[3., 5., 3.]], dtype='float64')
w_z = np.array([[4., 6., 4.],
[6., 9., 6.],
[4., 6., 4.]], dtype='float64')
answer_dict = {
'sum': np.array([[1., 4., 3.],
[3., 6., 5.],
[3., 3., 2.]], dtype='float64'),
'sum_wrap': np.array([[6., 6., 6.],
[6., 6., 6.],
[6., 6., 6.]], dtype='float64'),
}
answer_dict['average'] = answer_dict['sum'] / w_z
answer_dict['average_interpnan'] = answer_dict['sum'] / w_n
answer_dict['average_wrap_interpnan'] = answer_dict['sum_wrap'] / 8.
answer_dict['average_wrap'] = answer_dict['sum_wrap'] / 9.
answer_dict['average_withzeros'] = answer_dict['sum'] / 9.
answer_dict['average_withzeros_interpnan'] = answer_dict['sum'] / 8.
answer_dict['sum_withzeros'] = answer_dict['sum']
answer_dict['sum_interpnan'] = answer_dict['sum'] * 9/8.
answer_dict['sum_withzeros_interpnan'] = answer_dict['sum']
answer_dict['sum_wrap_interpnan'] = answer_dict['sum_wrap'] * 9/8.
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
elif nan_treatment == 'fill':
answer_key += '_withzeros'
if nan_treatment == 'interpolate':
answer_key += '_interpnan'
a = answer_dict[answer_key]
# Skip the NaN at [1, 1] when preserve_nan=True
posns = np.where(np.isfinite(z))
# for reasons unknown, the Windows FFT returns an answer for the [0, 0]
# component that is EXACTLY 10*np.spacing
assert_floatclose(z[posns], z[posns])
def test_big_fail(self):
""" Test that convolve_fft raises an exception if a too-large array is passed in """
with pytest.raises((ValueError, MemoryError)):
# while a good idea, this approach did not work; it actually writes to disk
# arr = np.memmap('file.np', mode='w+', shape=(512, 512, 512), dtype=complex)
# this just allocates the memory but never touches it; it's better:
arr = np.empty([512, 512, 512], dtype=complex)
# note 512**3 * 16 bytes = 2.0 GB
convolve_fft(arr, arr)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_non_normalized_kernel(self, boundary):
x = np.array([[0., 0., 4.],
[1., 2., 0.],
[0., 3., 0.]], dtype='float')
y = np.array([[1., -1., 1.],
[-1., 0., -1.],
[1., -1., 1.]], dtype='float')
z = convolve_fft(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary in (None, 'fill'):
assert_floatclose(z, np.array([[1., -5., 2.],
[1., 0., -3.],
[-2., -1., -1.]], dtype='float'))
elif boundary == 'wrap':
assert_floatclose(z, np.array([[0., -8., 6.],
[5., 0., -4.],
[2., 3., -4.]], dtype='float'))
else:
raise ValueError("Invalid boundary specification")
|
7b18ab2662056ac05374cf183971be4b250c734dbda9b28663dfb80d25884b7d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test sky projections defined in WCS Paper II"""
import os
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
from .. import projections
from ..parameters import InputParameterError
from ... import units as u
from ...io import fits
from ... import wcs
from ...utils.data import get_pkg_data_filename
from ...tests.helper import assert_quantity_allclose
def test_Projection_properties():
projection = projections.Sky2Pix_PlateCarree()
assert projection.n_inputs == 2
assert projection.n_outputs == 2
PIX_COORDINATES = [-10, 30]
pars = [(x,) for x in projections.projcodes]
# There is no groundtruth file for the XPH projection available here:
# http://www.atnf.csiro.au/people/mcalabre/WCS/example_data.html
pars.remove(('XPH',))
@pytest.mark.parametrize(('code',), pars)
def test_Sky2Pix(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(os.pardir, os.pardir, "wcs", "tests", "maps",
"1904-66_{0}.hdr".format(code))
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = 'PV2_{0}'.format(i + 1)
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0., 0.]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_pix = w.wcs.s2p(wcslibout['world'], 1)['pixcrd']
model = getattr(projections, 'Sky2Pix_' + code)
tinv = model(*params)
x, y = tinv(wcslibout['phi'], wcslibout['theta'])
assert_almost_equal(np.asarray(x), wcs_pix[:, 0])
assert_almost_equal(np.asarray(y), wcs_pix[:, 1])
@pytest.mark.parametrize(('code',), pars)
def test_Pix2Sky(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(os.pardir, os.pardir, "wcs", "tests", "maps",
"1904-66_{0}.hdr".format(code))
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = 'PV2_{0}'.format(i + 1)
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0., 0.]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_phi = wcslibout['phi']
wcs_theta = wcslibout['theta']
model = getattr(projections, 'Pix2Sky_' + code)
tanprj = model(*params)
phi, theta = tanprj(*PIX_COORDINATES)
assert_almost_equal(np.asarray(phi), wcs_phi)
assert_almost_equal(np.asarray(theta), wcs_theta)
@pytest.mark.parametrize(('code',), pars)
def test_Sky2Pix_unit(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(os.pardir, os.pardir, "wcs", "tests", "maps",
"1904-66_{0}.hdr".format(code))
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = 'PV2_{0}'.format(i + 1)
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0., 0.]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_pix = w.wcs.s2p(wcslibout['world'], 1)['pixcrd']
model = getattr(projections, 'Sky2Pix_' + code)
tinv = model(*params)
x, y = tinv(wcslibout['phi'] * u.deg, wcslibout['theta'] * u.deg)
assert_quantity_allclose(x, wcs_pix[:, 0] * u.deg)
assert_quantity_allclose(y, wcs_pix[:, 1] * u.deg)
@pytest.mark.parametrize(('code',), pars)
def test_Pix2Sky_unit(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(os.pardir, os.pardir, "wcs", "tests", "maps",
"1904-66_{0}.hdr".format(code))
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = 'PV2_{0}'.format(i + 1)
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0., 0.]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_phi = wcslibout['phi']
wcs_theta = wcslibout['theta']
model = getattr(projections, 'Pix2Sky_' + code)
tanprj = model(*params)
phi, theta = tanprj(*PIX_COORDINATES * u.deg)
assert_quantity_allclose(phi, wcs_phi * u.deg)
assert_quantity_allclose(theta, wcs_theta * u.deg)
phi, theta = tanprj(*(PIX_COORDINATES * u.deg).to(u.rad))
assert_quantity_allclose(phi, wcs_phi * u.deg)
assert_quantity_allclose(theta, wcs_theta * u.deg)
phi, theta = tanprj(*(PIX_COORDINATES * u.deg).to(u.arcmin))
assert_quantity_allclose(phi, wcs_phi * u.deg)
assert_quantity_allclose(theta, wcs_theta * u.deg)
@pytest.mark.parametrize(('code',), pars)
def test_projection_default(code):
"""Check astropy model eval with default parameters"""
# Just makes sure that the default parameter values are reasonable
# and accepted by wcslib.
model = getattr(projections, 'Sky2Pix_' + code)
tinv = model()
x, y = tinv(45, 45)
model = getattr(projections, 'Pix2Sky_' + code)
tinv = model()
x, y = tinv(0, 0)
class TestZenithalPerspective:
"""Test Zenithal Perspective projection"""
def setup_class(self):
ID = 'AZP'
wcs_map = os.path.join(os.pardir, os.pardir, "wcs", "tests", "maps",
"1904-66_{0}.hdr".format(ID))
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
self.wazp = wcs.WCS(header)
self.wazp.wcs.crpix = np.array([0., 0.])
self.wazp.wcs.crval = np.array([0., 0.])
self.wazp.wcs.cdelt = np.array([1., 1.])
self.pv_kw = [kw[2] for kw in self.wazp.wcs.get_pv()]
self.azp = projections.Pix2Sky_ZenithalPerspective(*self.pv_kw)
def test_AZP_p2s(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_phi = wcslibout['phi']
wcs_theta = wcslibout['theta']
phi, theta = self.azp(-10, 30)
assert_almost_equal(np.asarray(phi), wcs_phi)
assert_almost_equal(np.asarray(theta), wcs_theta)
def test_AZP_s2p(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_pix = self.wazp.wcs.s2p(wcslibout['world'], 1)['pixcrd']
x, y = self.azp.inverse(wcslibout['phi'], wcslibout['theta'])
assert_almost_equal(np.asarray(x), wcs_pix[:, 0])
assert_almost_equal(np.asarray(y), wcs_pix[:, 1])
class TestCylindricalPerspective:
"""Test cylindrical perspective projection"""
def setup_class(self):
ID = "CYP"
wcs_map = os.path.join(os.pardir, os.pardir, "wcs", "tests", "maps",
"1904-66_{0}.hdr".format(ID))
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
self.wazp = wcs.WCS(header)
self.wazp.wcs.crpix = np.array([0., 0.])
self.wazp.wcs.crval = np.array([0., 0.])
self.wazp.wcs.cdelt = np.array([1., 1.])
self.pv_kw = [kw[2] for kw in self.wazp.wcs.get_pv()]
self.azp = projections.Pix2Sky_CylindricalPerspective(*self.pv_kw)
def test_CYP_p2s(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_phi = wcslibout['phi']
wcs_theta = wcslibout['theta']
phi, theta = self.azp(-10, 30)
assert_almost_equal(np.asarray(phi), wcs_phi)
assert_almost_equal(np.asarray(theta), wcs_theta)
def test_CYP_s2p(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_pix = self.wazp.wcs.s2p(wcslibout['world'], 1)['pixcrd']
x, y = self.azp.inverse(wcslibout['phi'], wcslibout['theta'])
assert_almost_equal(np.asarray(x), wcs_pix[:, 0])
assert_almost_equal(np.asarray(y), wcs_pix[:, 1])
def test_AffineTransformation2D():
# Simple test with a scale and translation
model = projections.AffineTransformation2D(
matrix=[[2, 0], [0, 2]], translation=[1, 1])
# Coordinates for vertices of a rectangle
rect = [[0, 0], [1, 0], [0, 3], [1, 3]]
x, y = zip(*rect)
new_rect = np.vstack(model(x, y)).T
assert np.all(new_rect == [[1, 1], [3, 1], [1, 7], [3, 7]])
def test_AffineTransformation2D_inverse():
# Test non-invertible model
model1 = projections.AffineTransformation2D(
matrix=[[1, 1], [1, 1]])
with pytest.raises(InputParameterError):
model1.inverse
model2 = projections.AffineTransformation2D(
matrix=[[1.2, 3.4], [5.6, 7.8]], translation=[9.1, 10.11])
# Coordinates for vertices of a rectangle
rect = [[0, 0], [1, 0], [0, 3], [1, 3]]
x, y = zip(*rect)
x_new, y_new = model2.inverse(*model2(x, y))
assert_allclose([x, y], [x_new, y_new], atol=1e-10)
def test_c_projection_striding():
# This is just a simple test to make sure that the striding is
# handled correctly in the projection C extension
coords = np.arange(10).reshape((5, 2))
model = projections.Sky2Pix_ZenithalPerspective(2, 30)
phi, theta = model(coords[:, 0], coords[:, 1])
assert_almost_equal(
phi,
[0., 2.2790416, 4.4889294, 6.6250643, 8.68301])
assert_almost_equal(
theta,
[-76.4816918, -75.3594654, -74.1256332, -72.784558, -71.3406629])
def test_c_projections_shaped():
nx, ny = (5, 2)
x = np.linspace(0, 1, nx)
y = np.linspace(0, 1, ny)
xv, yv = np.meshgrid(x, y)
model = projections.Pix2Sky_TAN()
phi, theta = model(xv, yv)
assert_allclose(
phi,
[[0., 90., 90., 90., 90.],
[180., 165.96375653, 153.43494882, 143.13010235, 135.]])
assert_allclose(
theta,
[[90., 89.75000159, 89.50001269, 89.25004283, 89.00010152],
[89.00010152, 88.96933478, 88.88210788, 88.75019826, 88.58607353]])
def test_affine_with_quantities():
x = 1
y = 2
xdeg = (x * u.pix).to(u.deg, equivalencies=u.pixel_scale(2.5 * u.deg / u.pix))
ydeg = (y * u.pix).to(u.deg, equivalencies=u.pixel_scale(2.5 * u.deg / u.pix))
xpix = x * u.pix
ypix = y * u.pix
# test affine with matrix only
qaff = projections.AffineTransformation2D(matrix=[[1, 2], [2, 1]] * u.deg)
with pytest.raises(ValueError):
qx1, qy1 = qaff(xpix, ypix, equivalencies={
'x': u.pixel_scale(2.5 * u.deg / u.pix),
'y': u.pixel_scale(2.5 * u.deg / u.pix)})
# test affine with matrix and translation
qaff = projections.AffineTransformation2D(matrix=[[1, 2], [2, 1]] * u.deg,
translation=[1, 2] * u.deg)
qx1, qy1 = qaff(xpix, ypix, equivalencies={
'x': u.pixel_scale(2.5 * u.deg / u.pix),
'y': u.pixel_scale(2.5 * u.deg / u.pix)})
aff = projections.AffineTransformation2D(matrix=[[1, 2], [2, 1]], translation=[1, 2])
x1, y1 = aff(xdeg.value, ydeg.value)
assert_quantity_allclose(qx1, x1 * u.deg)
assert_quantity_allclose(qy1, y1 * u.deg)
# test the case of WCS PC and CDELT transformations
pc = np.array([[0.86585778922708, 0.50029020461607],
[-0.50029020461607, 0.86585778922708]])
cdelt = np.array([[1, 3.0683055555556E-05], [3.0966944444444E-05, 1]])
matrix = cdelt * pc
qaff = projections.AffineTransformation2D(matrix=matrix * u.deg,
translation=[0, 0] * u.deg)
inv_matrix = np.linalg.inv(matrix)
inv_qaff = projections.AffineTransformation2D(matrix=inv_matrix * u.pix,
translation=[0, 0] * u.pix)
qaff.inverse = inv_qaff
qx1, qy1 = qaff(xpix, ypix, equivalencies={
'x': u.pixel_scale(1 * u.deg / u.pix),
'y': u.pixel_scale(1 * u.deg / u.pix)})
x1, y1 = qaff.inverse(qx1, qy1, equivalencies={
'x': u.pixel_scale(1 * u.deg / u.pix),
'y': u.pixel_scale(1 * u.deg / u.pix)})
assert_quantity_allclose(x1, xpix)
assert_quantity_allclose(y1, ypix)
|
604c3dbb18bf3fb3c7e6273a500cc6be28e2c74dd13e66245a5c01c3a9eafe60 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Module to test fitting routines
"""
import os.path
import pytest
import numpy as np
from numpy import linalg
from numpy.testing import assert_allclose, assert_almost_equal
from unittest import mock
from . import irafutil
from .. import models
from ..core import Fittable2DModel, Parameter
from ..fitting import *
from ...utils import NumpyRNGContext
from ...utils.data import get_pkg_data_filename
from .utils import ignore_non_integer_warning
from ...stats import sigma_clip
from ...utils.exceptions import AstropyUserWarning
from ..fitting import populate_entry_points
import warnings
try:
from scipy import optimize
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
try:
from pkg_resources import EntryPoint
HAS_PKG = True
except ImportError:
HAS_PKG = False
fitters = [SimplexLSQFitter, SLSQPLSQFitter]
_RANDOM_SEED = 0x1337
class TestPolynomial2D:
"""Tests for 2D polynomail fitting."""
def setup_class(self):
self.model = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
def poly2(x, y):
return 1 + 2 * x + 3 * x ** 2 + 4 * y + 5 * y ** 2 + 6 * x * y
self.z = poly2(self.x, self.y)
self.fitter = LinearLSQFitter()
def test_poly2D_fitting(self):
v = self.model.fit_deriv(x=self.x, y=self.y)
p = linalg.lstsq(v, self.z.flatten())[0]
new_model = self.fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, p)
def test_eval(self):
new_model = self.fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model(self.x, self.y), self.z)
@pytest.mark.skipif('not HAS_SCIPY')
def test_polynomial2D_nonlinear_fitting(self):
self.model.parameters = [.6, 1.8, 2.9, 3.7, 4.9, 6.7]
nlfitter = LevMarLSQFitter()
new_model = nlfitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, [1, 2, 3, 4, 5, 6])
class TestICheb2D:
"""
Tests 2D Chebyshev polynomial fitting
Create a 2D polynomial (z) using Polynomial2DModel and default coefficients
Fit z using a ICheb2D model
Evaluate the ICheb2D polynomial and compare with the initial z
"""
def setup_class(self):
self.pmodel = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
self.z = self.pmodel(self.x, self.y)
self.cheb2 = models.Chebyshev2D(2, 2)
self.fitter = LinearLSQFitter()
def test_default_params(self):
self.cheb2.parameters = np.arange(9)
p = np.array([1344., 1772., 400., 1860., 2448., 552., 432., 568.,
128.])
z = self.cheb2(self.x, self.y)
model = self.fitter(self.cheb2, self.x, self.y, z)
assert_almost_equal(model.parameters, p)
def test_poly2D_cheb2D(self):
model = self.fitter(self.cheb2, self.x, self.y, self.z)
z1 = model(self.x, self.y)
assert_almost_equal(self.z, z1)
@pytest.mark.skipif('not HAS_SCIPY')
def test_chebyshev2D_nonlinear_fitting(self):
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
nlfitter = LevMarLSQFitter()
model = nlfitter(cheb2d, self.x, self.y, z)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
def test_chebyshev2D_nonlinear_fitting_with_weights(self):
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
nlfitter = LevMarLSQFitter()
weights = np.ones_like(self.y)
model = nlfitter(cheb2d, self.x, self.y, z, weights=weights)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
class TestJointFitter:
"""
Tests the joint fitting routine using 2 gaussian models
"""
def setup_class(self):
"""
Create 2 gaussian models and some data with noise.
Create a fitter for the two models keeping the amplitude parameter
common for the two models.
"""
self.g1 = models.Gaussian1D(10, mean=14.9, stddev=.3)
self.g2 = models.Gaussian1D(10, mean=13, stddev=.4)
self.jf = JointFitter([self.g1, self.g2],
{self.g1: ['amplitude'],
self.g2: ['amplitude']}, [9.8])
self.x = np.arange(10, 20, .1)
y1 = self.g1(self.x)
y2 = self.g2(self.x)
with NumpyRNGContext(_RANDOM_SEED):
n = np.random.randn(100)
self.ny1 = y1 + 2 * n
self.ny2 = y2 + 2 * n
self.jf(self.x, self.ny1, self.x, self.ny2)
def test_joint_parameter(self):
"""
Tests that the amplitude of the two models is the same
"""
assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])
assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])
def test_joint_fitter(self):
"""
Tests the fitting routine with similar procedure.
Compares the fitted parameters.
"""
p1 = [14.9, .3]
p2 = [13, .4]
A = 9.8
p = np.r_[A, p1, p2]
def model(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errfunc(p, x1, y1, x2, y2):
return np.ravel(np.r_[model(p[0], p[1:3], x1) - y1,
model(p[0], p[3:], x2) - y2])
coeff, _ = optimize.leastsq(errfunc, p,
args=(self.x, self.ny1, self.x, self.ny2))
assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2))
class TestLinearLSQFitter:
def test_chebyshev1D(self):
"""Tests fitting a 1D Chebyshev polynomial to some real world data."""
test_file = get_pkg_data_filename(os.path.join('data',
'idcompspec.fits'))
with open(test_file) as f:
lines = f.read()
reclist = lines.split('begin')
record = irafutil.IdentifyRecord(reclist[1])
coeffs = record.coeff
order = int(record.fields['order'])
initial_model = models.Chebyshev1D(order - 1,
domain=record.get_range())
fitter = LinearLSQFitter()
fitted_model = fitter(initial_model, record.x, record.z)
assert_allclose(fitted_model.parameters, np.array(coeffs),
rtol=10e-2)
def test_linear_fit_model_set(self):
"""Tests fitting multiple models simultaneously."""
init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected,
rtol=1e-1)
def test_linear_fit_2d_model_set(self):
"""Tests fitted multiple 2-D models simultaneously."""
init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
x = np.arange(10)
y = np.arange(10)
z_expected = init_model(x, y, model_set_axis=False)
assert z_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected,
rtol=1e-1)
def test_linear_fit_fixed_parameter(self):
"""
Tests fitting a polynomial model with a fixed parameter (issue #6135).
"""
init_model = models.Polynomial1D(degree=2, c1=1)
init_model.c1.fixed = True
x = np.arange(10)
y = 2 + x + 0.5*x*x
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [2., 1., 0.5], atol=1e-14)
def test_linear_fit_model_set_fixed_parameter(self):
"""
Tests fitting a polynomial model set with a fixed parameter (#6135).
"""
init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)
init_model.c1.fixed = True
x = np.arange(10)
yy = np.array([2 + x + 0.5*x*x, -2*x])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.c0, [2., 0.], atol=1e-14)
assert_allclose(fitted_model.c1, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c2, [0.5, 0.], atol=1e-14)
def test_linear_fit_2d_model_set_fixed_parameters(self):
"""
Tests fitting a 2d polynomial model set with fixed parameters (#6135).
"""
init_model = models.Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_model_set_masked_values(self):
"""
Tests model set fitting with masked value(s) (#4824, #6819).
"""
# NB. For single models, there is an equivalent doctest.
init_model = models.Polynomial1D(degree=1, n_models=2)
x = np.arange(10)
y = np.ma.masked_array([2*x+1, x-2], mask=np.zeros_like([x, x]))
y[0, 7] = 100. # throw off fit coefficients if unmasked
y.mask[0, 7] = True
y[1, 1:3] = -100.
y.mask[1, 1:3] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.c0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1, [2., 1.], atol=1e-14)
def test_linear_fit_2d_model_set_masked_values(self):
"""
Tests 2D model set fitting with masked value(s) (#4824, #6819).
"""
init_model = models.Polynomial2D(1, n_models=2)
x, y = np.mgrid[0:5, 0:5]
z = np.ma.masked_array([2*x+3*y+1, x-0.5*y-2],
mask=np.zeros_like([x, x]))
z[0, 3, 1] = -1000. # throw off fit coefficients if unmasked
z.mask[0, 3, 1] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model.c0_0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1_0, [2., 1.], atol=1e-14)
assert_allclose(fitted_model.c0_1, [3., -0.5], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestNonLinearFitters:
"""Tests non-linear least squares fitting and the SLSQP algorithm."""
def setup_class(self):
self.initial_values = [100, 5, 1]
self.xdata = np.arange(0, 10, 0.1)
sigma = 4. * np.ones_like(self.xdata)
with NumpyRNGContext(_RANDOM_SEED):
yerror = np.random.normal(0, sigma)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
self.ydata = func(self.initial_values, self.xdata) + yerror
self.gauss = models.Gaussian1D(100, 5, stddev=1)
def test_estimated_vs_analytic_deriv(self):
"""
Runs `LevMarLSQFitter` with estimated and analytic derivatives of a
`Gaussian1D`.
"""
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
efitter = LevMarLSQFitter()
emodel = efitter(g1e, self.xdata, self.ydata, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
def test_estimated_vs_analytic_deriv_with_weights(self):
"""
Runs `LevMarLSQFitter` with estimated and analytic derivatives of a
`Gaussian1D`.
"""
weights = 1.0 / (self.ydata / 10.)
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata, weights=weights)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
efitter = LevMarLSQFitter()
emodel = efitter(g1e, self.xdata, self.ydata, weights=weights, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
def test_with_optimize(self):
"""
Tests results from `LevMarLSQFitter` against `scipy.optimize.leastsq`.
"""
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errfunc(p, x, y):
return func(p, x) - y
result = optimize.leastsq(errfunc, self.initial_values,
args=(self.xdata, self.ydata))
assert_allclose(model.parameters, result[0], rtol=10 ** (-3))
def test_with_weights(self):
"""
Tests results from `LevMarLSQFitter` with weights.
"""
# part 1: weights are equal to 1
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=np.ones_like(self.xdata))
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
# part 2: weights are 0 or 1 (effectively, they are a mask)
weights = np.zeros_like(self.xdata)
weights[::2] = 1.
mask = weights >= 1.
model = fitter(self.gauss, self.xdata[mask], self.ydata[mask],
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=weights)
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
@pytest.mark.parametrize('fitter_class', fitters)
def test_fitter_against_LevMar(self, fitter_class):
"""Tests results from non-linear fitters against `LevMarLSQFitter`."""
levmar = LevMarLSQFitter()
fitter = fitter_class()
with ignore_non_integer_warning():
new_model = fitter(self.gauss, self.xdata, self.ydata)
model = levmar(self.gauss, self.xdata, self.ydata)
assert_allclose(model.parameters, new_model.parameters,
rtol=10 ** (-4))
def test_LSQ_SLSQP_with_constraints(self):
"""
Runs `LevMarLSQFitter` and `SLSQPLSQFitter` on a model with
constraints.
"""
g1 = models.Gaussian1D(100, 5, stddev=1)
g1.mean.fixed = True
fitter = LevMarLSQFitter()
fslsqp = SLSQPLSQFitter()
with ignore_non_integer_warning():
slsqp_model = fslsqp(g1, self.xdata, self.ydata)
model = fitter(g1, self.xdata, self.ydata)
assert_allclose(model.parameters, slsqp_model.parameters,
rtol=10 ** (-4))
def test_simplex_lsq_fitter(self):
"""A basic test for the `SimplexLSQ` fitter."""
class Rosenbrock(Fittable2DModel):
a = Parameter()
b = Parameter()
@staticmethod
def evaluate(x, y, a, b):
return (a - x) ** 2 + b * (y - x ** 2) ** 2
x = y = np.linspace(-3.0, 3.0, 100)
with NumpyRNGContext(_RANDOM_SEED):
z = Rosenbrock.evaluate(x, y, 1.0, 100.0)
z += np.random.normal(0., 0.1, size=z.shape)
fitter = SimplexLSQFitter()
r_i = Rosenbrock(1, 100)
r_f = fitter(r_i, x, y, z)
assert_allclose(r_f.parameters, [1.0, 100.0], rtol=1e-2)
def test_param_cov(self):
"""
Tests that the 'param_cov' fit_info entry gets the right answer for
*linear* least squares, where the answer is exact
"""
a = 2
b = 100
with NumpyRNGContext(_RANDOM_SEED):
x = np.linspace(0, 1, 100)
# y scatter is amplitude ~1 to make sure covarience is
# non-negligible
y = x*a + b + np.random.randn(len(x))
# first compute the ordinary least squares covariance matrix
X = np.matrix(np.vstack([x, np.ones(len(x))]).T)
beta = np.linalg.inv(X.T * X) * X.T * np.matrix(y).T
s2 = np.sum((y - (X * beta).A.ravel())**2) / (len(y) - len(beta))
olscov = np.linalg.inv(X.T * X) * s2
# now do the non-linear least squares fit
mod = models.Linear1D(a, b)
fitter = LevMarLSQFitter()
fmod = fitter(mod, x, y)
assert_allclose(fmod.parameters, beta.A.ravel())
assert_allclose(olscov, fitter.fit_info['param_cov'])
@pytest.mark.skipif('not HAS_PKG')
class TestEntryPoint:
"""Tests population of fitting with entry point fitters"""
def setup_class(self):
self.exception_not_thrown = Exception("The test should not have gotten here. There was no exception thrown")
def successfulimport(self):
# This should work
class goodclass(Fitter):
__name__ = "GoodClass"
return goodclass
def raiseimporterror(self):
# This should fail as it raises an Import Error
raise ImportError
def returnbadfunc(self):
def badfunc():
# This should import but it should fail type check
pass
return badfunc
def returnbadclass(self):
# This should import But it should fail subclass type check
class badclass:
pass
return badclass
def test_working(self):
"""This should work fine"""
mock_entry_working = mock.create_autospec(EntryPoint)
mock_entry_working.name = "Working"
mock_entry_working.load = self.successfulimport
populate_entry_points([mock_entry_working])
def test_import_error(self):
"""This raises an import error on load to test that it is handled correctly"""
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_importerror = mock.create_autospec(EntryPoint)
mock_entry_importerror.name = "IErr"
mock_entry_importerror.load = self.raiseimporterror
populate_entry_points([mock_entry_importerror])
except AstropyUserWarning as w:
if "ImportError" in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
def test_bad_func(self):
"""This returns a function which fails the type check"""
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_badfunc = mock.create_autospec(EntryPoint)
mock_entry_badfunc.name = "BadFunc"
mock_entry_badfunc.load = self.returnbadfunc
populate_entry_points([mock_entry_badfunc])
except AstropyUserWarning as w:
if "Class" in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
def test_bad_class(self):
"""This returns a class which doesn't inherient from fitter """
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_badclass = mock.create_autospec(EntryPoint)
mock_entry_badclass.name = "BadClass"
mock_entry_badclass.load = self.returnbadclass
populate_entry_points([mock_entry_badclass])
except AstropyUserWarning as w:
if 'modeling.Fitter' in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
@pytest.mark.skipif('not HAS_SCIPY')
class Test1DFittingWithOutlierRemoval:
def setup_class(self):
self.x = np.linspace(-5., 5., 200)
self.model_params = (3.0, 1.3, 0.8)
def func(p, x):
return p[0]*np.exp(-0.5*(x - p[1])**2/p[2]**2)
self.y = func(self.model_params, self.x)
def test_with_fitters_and_sigma_clip(self):
import scipy.stats as stats
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.x.shape)
self.y += (np.random.normal(0., 0.2, self.x.shape) +
c*np.random.normal(3.0, 5.0, self.x.shape))
g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
# test with Levenberg-Marquardt Least Squares fitter
fit = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
_, fitted_model = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
# test with Sequential Least Squares Programming fitter
fit = FittingWithOutlierRemoval(SLSQPLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
_, fitted_model = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
# test with Simplex LSQ fitter
fit = FittingWithOutlierRemoval(SimplexLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
_, fitted_model = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, atol=1e-1)
@pytest.mark.skipif('not HAS_SCIPY')
class Test2DFittingWithOutlierRemoval:
def setup_class(self):
self.y, self.x = np.mgrid[-3:3:128j, -3:3:128j]
self.model_params = (3.0, 1.0, 0.0, 0.8, 0.8)
def Gaussian_2D(p, pos):
return p[0]*np.exp(-0.5*(pos[0] - p[2])**2 / p[4]**2 -
0.5*(pos[1] - p[1])**2 / p[3]**2)
self.z = Gaussian_2D(self.model_params, np.array([self.y, self.x]))
def initial_guess(self, data, pos):
y = pos[0]
x = pos[1]
"""computes the centroid of the data as the initial guess for the
center position"""
wx = x * data
wy = y * data
total_intensity = np.sum(data)
x_mean = np.sum(wx) / total_intensity
y_mean = np.sum(wy) / total_intensity
x_to_pixel = x[0].size / (x[x[0].size - 1][x[0].size - 1] - x[0][0])
y_to_pixel = y[0].size / (y[y[0].size - 1][y[0].size - 1] - y[0][0])
x_pos = np.around(x_mean * x_to_pixel + x[0].size / 2.).astype(int)
y_pos = np.around(y_mean * y_to_pixel + y[0].size / 2.).astype(int)
amplitude = data[y_pos][x_pos]
return amplitude, x_mean, y_mean
def test_with_fitters_and_sigma_clip(self):
import scipy.stats as stats
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.z.shape)
self.z += (np.random.normal(0., 0.2, self.z.shape) +
c*np.random.normal(self.z, 2.0, self.z.shape))
guess = self.initial_guess(self.z, np.array([self.y, self.x]))
g2_init = models.Gaussian2D(amplitude=guess[0], x_mean=guess[1],
y_mean=guess[2], x_stddev=0.75,
y_stddev=1.25)
# test with Levenberg-Marquardt Least Squares fitter
fit = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.)
_, fitted_model = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
# test with Sequential Least Squares Programming fitter
fit = FittingWithOutlierRemoval(SLSQPLSQFitter(), sigma_clip, niter=3,
sigma=3.)
_, fitted_model = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
# test with Simplex LSQ fitter
fit = FittingWithOutlierRemoval(SimplexLSQFitter(), sigma_clip,
niter=3, sigma=3.)
_, fitted_model = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
def test_1d_set_fitting_with_outlier_removal():
"""Test model set fitting with outlier removal (issue #6819)"""
poly_set = models.Polynomial1D(2, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
x = np.arange(10)
y = np.array([2.5*x - 4, 2*x*x + x + 10])
y[1,5] = -1000 # outlier
filt_y, poly_set = fitter(poly_set, x, y)
assert_allclose(poly_set.c0, [-4., 10.], atol=1e-14)
assert_allclose(poly_set.c1, [2.5, 1.], atol=1e-14)
assert_allclose(poly_set.c2, [0., 2.], atol=1e-14)
def test_2d_set_axis_2_fitting_with_outlier_removal():
"""Test fitting 2D model set (axis 2) with outlier removal (issue #6819)"""
poly_set = models.Polynomial2D(1, n_models=2, model_set_axis=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
y, x = np.mgrid[0:5, 0:5]
z = np.rollaxis(np.array([x+y, 1-0.1*x+0.2*y]), 0, 3)
z[3,3:5,0] = 100. # outliers
filt_z, poly_set = fitter(poly_set, x, y, z)
assert_allclose(poly_set.c0_0, [[[0., 1.]]], atol=1e-14)
assert_allclose(poly_set.c1_0, [[[1., -0.1]]], atol=1e-14)
assert_allclose(poly_set.c0_1, [[[1., 0.2]]], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestWeightedFittingWithOutlierRemoval:
"""Issue #7020 """
def setup_class(self):
# values of x,y not important as we fit y(x,y) = p0 model here
self.y, self.x = np.mgrid[0:20, 0:20]
self.z = np.mod(self.x + self.y, 2) * 2 - 1 # -1,1 chessboard
self.weights = np.mod(self.x + self.y, 2) * 2 + 1 # 1,3 chessboard
self.z[0,0] = 1000.0 # outlier
self.z[0,1] = 1000.0 # outlier
self.x1d = self.x.flatten()
self.z1d = self.z.flatten()
self.weights1d = self.weights.flatten()
def test_1d_without_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d)
assert_allclose(fit.parameters[0], self.z1d.mean(), atol=10**(-2))
def test_1d_without_weights_with_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
filtered, fit = fitter(model, self.x1d, self.z1d)
assert(filtered.count() == self.z1d.size - 2)
assert(filtered.mask[0] and filtered.mask[1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2)) # with removed outliers mean is 0.0
def test_1d_with_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_1d_with_weights_with_sigma_clip(self):
"""smoke test for #7020 - fails without fitting.py patch because weights does not propagate"""
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
filtered, fit = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
assert(fit.parameters[0] < 1.0) # outliers didn't pull it out of [-1:1] because they had been removed
def test_1d_set_with_common_weights_with_sigma_clip(self):
"""added for #6819 (1D model set with weights in common)"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
z1d = np.array([self.z1d, self.z1d])
filtered, fit = fitter(model, self.x1d, z1d, weights=self.weights1d)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_2d_without_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x, self.y, self.z)
assert_allclose(fit.parameters[0], self.z.mean(), atol=10**(-2))
def test_2d_without_weights_with_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
filtered, fit = fitter(model, self.x, self.y, self.z)
assert(filtered.count() == self.z.size - 2)
assert(filtered.mask[0,0] and filtered.mask[0,1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2))
def test_2d_with_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LevMarLSQFitter() # LinearLSQFitter doesn't handle weights properly in 2D
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_2d_with_weights_with_sigma_clip(self):
"""smoke test for #7020 - fails without fitting.py patch because weights does not propagate"""
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.)
filtered, fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
assert(fit.parameters[0] < 1.0) # outliers didn't pull it out of [-1:1] because they had been removed
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitters_with_weights():
"""Issue #5737 """
Xin, Yin = np.mgrid[0:21, 0:21]
fitter = LevMarLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
# Non-linear model
g2 = models.Gaussian2D(10, 10, 9, 2, 3)
z = g2(Xin, Yin)
gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig)
assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2))
# Linear model
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig)
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitters_interface():
"""
Test that **kwargs work with all optimizers.
This is a basic smoke test.
"""
levmar = LevMarLSQFitter()
slsqp = SLSQPLSQFitter()
simplex = SimplexLSQFitter()
kwargs = {'maxiter': 77, 'verblevel': 1, 'epsilon': 1e-2, 'acc': 1e-6}
simplex_kwargs = {'maxiter': 77, 'verblevel': 1, 'acc': 1e-6}
model = models.Gaussian1D(10, 4, .3)
x = np.arange(21)
y = model(x)
slsqp_model = slsqp(model, x, y, **kwargs)
simplex_model = simplex(model, x, y, **simplex_kwargs)
kwargs.pop('verblevel')
lm_model = levmar(model, x, y, **kwargs)
|
6e803a09c26ab5a5da800bb1391ae92c03561bde4485fee835e990c38ef5853c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for model evaluation.
Compare the results of some models with other programs.
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from .example_models import models_1D, models_2D
from .. import fitting, models
from ..core import FittableModel
from ..polynomial import PolynomialBase
from ... import units as u
from ...utils import minversion
from ...tests.helper import assert_quantity_allclose
from ...utils import NumpyRNGContext
try:
import scipy
from scipy import optimize # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
HAS_SCIPY_14 = HAS_SCIPY and minversion(scipy, "0.14")
@pytest.mark.skipif('not HAS_SCIPY')
def test_custom_model(amplitude=4, frequency=1):
def sine_model(x, amplitude=4, frequency=1):
"""
Model function
"""
return amplitude * np.sin(2 * np.pi * frequency * x)
def sine_deriv(x, amplitude=4, frequency=1):
"""
Jacobian of model function, e.g. derivative of the function with
respect to the *parameters*
"""
da = np.sin(2 * np.pi * frequency * x)
df = 2 * np.pi * x * amplitude * np.cos(2 * np.pi * frequency * x)
return np.vstack((da, df))
SineModel = models.custom_model(sine_model, fit_deriv=sine_deriv)
x = np.linspace(0, 4, 50)
sin_model = SineModel()
y = sin_model.evaluate(x, 5., 2.)
y_prime = sin_model.fit_deriv(x, 5., 2.)
np.random.seed(0)
data = sin_model(x) + np.random.rand(len(x)) - 0.5
fitter = fitting.LevMarLSQFitter()
model = fitter(sin_model, x, data)
assert np.all((np.array([model.amplitude.value, model.frequency.value]) -
np.array([amplitude, frequency])) < 0.001)
def test_custom_model_init():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel(amplitude=2., frequency=0.5)
assert sin_model.amplitude == 2.
assert sin_model.frequency == 0.5
def test_custom_model_defaults():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel()
assert SineModel.amplitude.default == 4
assert SineModel.frequency.default == 1
assert sin_model.amplitude == 4
assert sin_model.frequency == 1
def test_custom_model_bounding_box():
"""Test bounding box evaluation for a 3D model"""
def ellipsoid(x, y, z, x0=13, y0=10, z0=8, a=4, b=3, c=2, amp=1):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(models.custom_model(ellipsoid)):
@property
def bounding_box(self):
return ((self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a))
model = Ellipsoid3D()
bbox = model.bounding_box
zlim, ylim, xlim = bbox
dz, dy, dx = np.diff(bbox) / 2
z1, y1, x1 = np.mgrid[slice(zlim[0], zlim[1] + 1),
slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1)]
z2, y2, x2 = np.mgrid[slice(zlim[0] - dz, zlim[1] + dz + 1),
slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1)]
arr = model(x2, y2, z2)
sub_arr = model(x1, y1, z1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
class Fittable2DModelTester:
"""
Test class for all two dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.1
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
def test_input2D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x, self.y)
model(self.x1, self.y1)
model(self.x2, self.y2)
def test_eval2D(self, model_class, test_parameters):
"""Test model values add certain given points"""
model = create_model(model_class, test_parameters)
x = test_parameters['x_values']
y = test_parameters['y_values']
z = test_parameters['z_values']
assert np.all((np.abs(model(x, y) - z) < self.eval_error))
def test_bounding_box2D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = ((-5, 5), (-5, 5))
assert model.bounding_box == ((-5, 5), (-5, 5))
model.bounding_box = None
with pytest.raises(NotImplementedError):
model.bounding_box
# test the exception of dimensions don't match
with pytest.raises(ValueError):
model.bounding_box = (-5, 5)
del model.bounding_box
try:
bbox = model.bounding_box
except NotImplementedError:
pytest.skip("Bounding_box is not defined for model.")
ylim, xlim = bbox
dy, dx = np.diff(bbox)/2
y1, x1 = np.mgrid[slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1)]
y2, x2 = np.mgrid[slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1)]
arr = model(x2, y2)
sub_arr = model(x1, y1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitter2D(self, model_class, test_parameters):
"""Test if the parametric model works with the fitter."""
x_lim = test_parameters['x_lim']
y_lim = test_parameters['y_lim']
parameters = test_parameters['parameters']
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.N)
xv, yv = np.meshgrid(x, y)
np.random.seed(0)
# add 10% noise to the amplitude
noise = np.random.rand(self.N, self.N) - 0.5
data = model(xv, yv) + 0.1 * parameters[0] * noise
fitter = fitting.LevMarLSQFitter()
new_model = fitter(model, xv, yv, data)
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed)
if not fixed])
fitted = np.array([param.value for param in params
if not param.fixed])
assert_allclose(fitted, expected,
atol=self.fit_error)
@pytest.mark.skipif('not HAS_SCIPY')
def test_deriv_2D(self, model_class, test_parameters):
"""
Test the derivative of a model by fitting with an estimated and
analytical derivative.
"""
x_lim = test_parameters['x_lim']
y_lim = test_parameters['y_lim']
if model_class.fit_deriv is None:
pytest.skip("Derivative function is not defined for model.")
if issubclass(model_class, PolynomialBase):
pytest.skip("Skip testing derivative of polynomials.")
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.M)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.M)
xv, yv = np.meshgrid(x, y)
try:
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
model = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
except KeyError:
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model = create_model(model_class, test_parameters,
use_constraints=False)
# add 10% noise to the amplitude
rsn = np.random.RandomState(1234567890)
amplitude = test_parameters['parameters'][0]
n = 0.1 * amplitude * (rsn.rand(self.M, self.N) - 0.5)
data = model(xv, yv) + n
fitter_with_deriv = fitting.LevMarLSQFitter()
new_model_with_deriv = fitter_with_deriv(model_with_deriv, xv, yv,
data)
fitter_no_deriv = fitting.LevMarLSQFitter()
new_model_no_deriv = fitter_no_deriv(model_no_deriv, xv, yv, data,
estimate_jacobian=True)
assert_allclose(new_model_with_deriv.parameters,
new_model_no_deriv.parameters,
rtol=0.1)
class Fittable1DModelTester:
"""
Test class for all one dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.1
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
def test_input1D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x)
model(self.x1)
model(self.x2)
def test_eval1D(self, model_class, test_parameters):
"""
Test model values at certain given points
"""
model = create_model(model_class, test_parameters)
x = test_parameters['x_values']
y = test_parameters['y_values']
assert_allclose(model(x), y, atol=self.eval_error)
def test_bounding_box1D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = (-5, 5)
model.bounding_box = None
with pytest.raises(NotImplementedError):
model.bounding_box
del model.bounding_box
# test exception if dimensions don't match
with pytest.raises(ValueError):
model.bounding_box = 5
try:
bbox = model.bounding_box
except NotImplementedError:
pytest.skip("Bounding_box is not defined for model.")
if isinstance(model, models.Lorentz1D):
rtol = 0.01 # 1% agreement is enough due to very extended wings
ddx = 0.1 # Finer sampling to "integrate" flux for narrow peak
else:
rtol = 1e-7
ddx = 1
dx = np.diff(bbox) / 2
x1 = np.mgrid[slice(bbox[0], bbox[1] + 1, ddx)]
x2 = np.mgrid[slice(bbox[0] - dx, bbox[1] + dx + 1, ddx)]
arr = model(x2)
sub_arr = model(x1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * rtol
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitter1D(self, model_class, test_parameters):
"""
Test if the parametric model works with the fitter.
"""
x_lim = test_parameters['x_lim']
parameters = test_parameters['parameters']
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
np.random.seed(0)
# add 10% noise to the amplitude
relative_noise_amplitude = 0.01
data = ((1 + relative_noise_amplitude * np.random.randn(len(x))) *
model(x))
fitter = fitting.LevMarLSQFitter()
new_model = fitter(model, x, data)
# Only check parameters that were free in the fit
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed)
if not fixed])
fitted = np.array([param.value for param in params
if not param.fixed])
assert_allclose(fitted, expected, atol=self.fit_error)
@pytest.mark.skipif('not HAS_SCIPY')
def test_deriv_1D(self, model_class, test_parameters):
"""
Test the derivative of a model by comparing results with an estimated
derivative.
"""
x_lim = test_parameters['x_lim']
if model_class.fit_deriv is None:
pytest.skip("Derivative function is not defined for model.")
if issubclass(model_class, PolynomialBase):
pytest.skip("Skip testing derivative of polynomials.")
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
parameters = test_parameters['parameters']
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False)
# add 10% noise to the amplitude
rsn = np.random.RandomState(1234567890)
n = 0.1 * parameters[0] * (rsn.rand(self.N) - 0.5)
data = model_with_deriv(x) + n
fitter_with_deriv = fitting.LevMarLSQFitter()
new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data)
fitter_no_deriv = fitting.LevMarLSQFitter()
new_model_no_deriv = fitter_no_deriv(model_no_deriv, x, data,
estimate_jacobian=True)
assert_allclose(new_model_with_deriv.parameters,
new_model_no_deriv.parameters, atol=0.15)
def create_model(model_class, test_parameters, use_constraints=True,
parameter_key='parameters'):
"""Create instance of model class."""
constraints = {}
if issubclass(model_class, PolynomialBase):
return model_class(**test_parameters[parameter_key])
elif issubclass(model_class, FittableModel):
if "requires_scipy" in test_parameters and not HAS_SCIPY:
pytest.skip("SciPy not found")
if use_constraints:
if 'constraints' in test_parameters:
constraints = test_parameters['constraints']
return model_class(*test_parameters[parameter_key], **constraints)
@pytest.mark.parametrize(('model_class', 'test_parameters'),
sorted(models_1D.items(), key=lambda x: str(x[0])))
class TestFittable1DModels(Fittable1DModelTester):
pass
@pytest.mark.parametrize(('model_class', 'test_parameters'),
sorted(models_2D.items(), key=lambda x: str(x[0])))
class TestFittable2DModels(Fittable2DModelTester):
pass
def test_ShiftModel():
# Shift by a scalar
m = models.Shift(42)
assert m(0) == 42
assert_equal(m([1, 2]), [43, 44])
# Shift by a list
m = models.Shift([42, 43], n_models=2)
assert_equal(m(0), [42, 43])
assert_equal(m([1, 2], model_set_axis=False),
[[43, 44], [44, 45]])
def test_ScaleModel():
# Scale by a scalar
m = models.Scale(42)
assert m(0) == 0
assert_equal(m([1, 2]), [42, 84])
# Scale by a list
m = models.Scale([42, 43], n_models=2)
assert_equal(m(0), [0, 0])
assert_equal(m([1, 2], model_set_axis=False),
[[42, 84], [43, 86]])
def test_voigt_model():
"""
Currently just tests that the model peaks at its origin.
Regression test for https://github.com/astropy/astropy/issues/3942
"""
m = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
x = np.arange(0, 10, 0.01)
y = m(x)
assert y[500] == y.max() # y[500] is right at the center
def test_model_instance_repr():
m = models.Gaussian1D(1.5, 2.5, 3.5)
assert repr(m) == '<Gaussian1D(amplitude=1.5, mean=2.5, stddev=3.5)>'
@pytest.mark.skipif("not HAS_SCIPY_14")
def test_tabular_interp_1d():
"""
Test Tabular1D model.
"""
points = np.arange(0, 5)
values = [1., 10, 2, 45, -3]
LookupTable = models.tabular_model(1)
model = LookupTable(points=points, lookup_table=values)
xnew = [0., .7, 1.4, 2.1, 3.9]
ans1 = [1., 7.3, 6.8, 6.3, 1.8]
assert_allclose(model(xnew), ans1)
# Test evaluate without passing `points`.
model = LookupTable(lookup_table=values)
assert_allclose(model(xnew), ans1)
# Test bounds error.
xextrap = [0., .7, 1.4, 2.1, 3.9, 4.1]
with pytest.raises(ValueError):
model(xextrap)
# test extrapolation and fill value
model = LookupTable(lookup_table=values, bounds_error=False,
fill_value=None)
assert_allclose(model(xextrap),
[1., 7.3, 6.8, 6.3, 1.8, -7.8])
# Test unit support
xnew = xnew * u.nm
ans1 = ans1 * u.nJy
model = LookupTable(points=points*u.nm, lookup_table=values*u.nJy)
assert_quantity_allclose(model(xnew), ans1)
assert_quantity_allclose(model(xnew.to(u.nm)), ans1)
assert model.bounding_box == (0 * u.nm, 4 * u.nm)
# Test fill value unit conversion and unitless input on table with unit
model = LookupTable([1, 2, 3], [10, 20, 30] * u.nJy, bounds_error=False,
fill_value=1e-33*(u.W / (u.m * u.m * u.Hz)))
assert_quantity_allclose(model(np.arange(5)),
[100, 10, 20, 30, 100] * u.nJy)
@pytest.mark.skipif("not HAS_SCIPY_14")
def test_tabular_interp_2d():
table = np.array([
[-0.04614432, -0.02512547, -0.00619557, 0.0144165, 0.0297525],
[-0.04510594, -0.03183369, -0.01118008, 0.01201388, 0.02496205],
[-0.05464094, -0.02804499, -0.00960086, 0.01134333, 0.02284104],
[-0.04879338, -0.02539565, -0.00440462, 0.01795145, 0.02122417],
[-0.03637372, -0.01630025, -0.00157902, 0.01649774, 0.01952131]])
points = np.arange(0, 5)
points = (points, points)
xnew = np.array([0., .7, 1.4, 2.1, 3.9])
LookupTable = models.tabular_model(2)
model = LookupTable(points, table)
znew = model(xnew, xnew)
result = np.array(
[-0.04614432, -0.03450009, -0.02241028, -0.0069727, 0.01938675])
assert_allclose(znew, result, atol=1e-7)
# test 2D arrays as input
a = np.arange(12).reshape((3, 4))
y, x = np.mgrid[:3, :4]
t = models.Tabular2D(lookup_table=a)
r = t(y, x)
assert_allclose(a, r)
with pytest.raises(ValueError):
model = LookupTable(points=([1.2, 2.3], [1.2, 6.7], [3, 4]))
with pytest.raises(ValueError):
model = LookupTable(lookup_table=[1, 2, 3])
with pytest.raises(NotImplementedError):
model = LookupTable(n_models=2)
with pytest.raises(ValueError):
model = LookupTable(([1, 2], [3, 4]), [5, 6])
with pytest.raises(ValueError):
model = LookupTable(([1, 2] * u.m, [3, 4]), [[5, 6], [7, 8]])
with pytest.raises(ValueError):
model = LookupTable(points, table, bounds_error=False,
fill_value=1*u.Jy)
# Test unit support
points = points[0] * u.nm
points = (points, points)
xnew = xnew * u.nm
model = LookupTable(points, table * u.nJy)
result = result * u.nJy
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy)
xnew = xnew.to(u.m)
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy)
bbox = (0 * u.nm, 4 * u.nm)
bbox = (bbox, bbox)
assert model.bounding_box == bbox
@pytest.mark.skipif("not HAS_SCIPY_14")
def test_tabular_nd():
a = np.arange(24).reshape((2, 3, 4))
x, y, z = np.mgrid[:2, :3, :4]
tab = models.tabular_model(3)
t = tab(lookup_table=a)
result = t(x, y, z)
assert_allclose(a, result)
with pytest.raises(ValueError):
models.tabular_model(0)
def test_with_bounding_box():
"""
Test the option to evaluate a model respecting
its bunding_box.
"""
p = models.Polynomial2D(2) & models.Polynomial2D(2)
m = models.Mapping((0, 1, 0, 1)) | p
with NumpyRNGContext(1234567):
m.parameters = np.random.rand(12)
m.bounding_box = ((3, 9), (1, 8))
x, y = np.mgrid[:10, :10]
a, b = m(x, y)
aw, bw = m(x, y, with_bounding_box=True)
ind = (~np.isnan(aw)).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
aw, bw = m(x, y, with_bounding_box=True, fill_value=1000)
ind = (aw != 1000).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
# test the order of bbox is not reversed for 1D models
p = models.Polynomial1D(1, c0=12, c1=2.3)
p.bounding_box = (0, 5)
assert(p(1) == p(1, with_bounding_box=True))
|
c14a7626ce23106bcebc68fb0340c92170c68e7ff2d261d24de6c11f3dbc57cd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests model set evaluation for some common use cases.
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
from ..models import Polynomial1D, Polynomial2D
from ..fitting import LinearLSQFitter
from ..core import Model
from ..parameters import Parameter
x = np.arange(4)
xx = np.array([x, x + 10])
xxx = np.arange(24).reshape((3, 4, 2))
class TParModel(Model):
"""
A toy model to test parameters machinery
"""
# standard_broadasting = False
inputs = ('x',)
outputs = ('x',)
coeff = Parameter()
e = Parameter()
def __init__(self, coeff, e, **kwargs):
super().__init__(coeff=coeff, e=e, **kwargs)
@staticmethod
def evaluate(x, coeff, e):
return x*coeff + e
def test_model_axis_1():
"""
Test that a model initialized with model_set_axis=1
can be evaluated with model_set_axis=False.
"""
model_axis = 1
n_models = 2
p1 = Polynomial1D(1, n_models=n_models, model_set_axis=model_axis)
p1.c0 = [2, 3]
p1.c1 = [1, 2]
t1 = Polynomial1D(1, c0=2, c1=1)
t2 = Polynomial1D(1, c0=3, c1=2)
with pytest.raises(ValueError):
p1(x)
with pytest.raises(ValueError):
p1(xx)
y = p1(x, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0], t1(x))
assert_allclose(y[:, 1], t2(x))
y = p1(xx, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0, :], t1(xx))
assert_allclose(y[:, 1, :], t2(xx))
y = p1(xxx, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0, :, :], t1(xxx))
assert_allclose(y[:, 1, :, :], t2(xxx))
def test_model_axis_2():
"""
Test that a model initialized with model_set_axis=2
can be evaluated with model_set_axis=False.
"""
p1 = Polynomial1D(1, c0=[[[1, 2,3 ]]], c1=[[[10, 20, 30]]],
n_models=3, model_set_axis=2)
t1 = Polynomial1D(1, c0=1, c1=10)
t2 = Polynomial1D(1, c0=2, c1=20)
t3 = Polynomial1D(1, c0=3, c1=30)
with pytest.raises(ValueError):
p1(x)
with pytest.raises(ValueError):
p1(xx)
y = p1(x, model_set_axis=False)
assert y.shape == (1, 4, 3)
assert_allclose(y[:, :, 0].flatten(), t1(x))
assert_allclose(y[:, :, 1].flatten(), t2(x))
assert_allclose(y[:, :, 2].flatten(), t3(x))
p2 = Polynomial2D(1, c0_0=[[[0,1,2]]], c0_1=[[[3,4,5]]],
c1_0=[[[5,6,7]]], n_models=3, model_set_axis=2)
t1 = Polynomial2D(1, c0_0=0, c0_1=3, c1_0=5)
t2 = Polynomial2D(1, c0_0=1, c0_1=4, c1_0=6)
t3 = Polynomial2D(1, c0_0=2, c0_1=5, c1_0=7)
assert p2.c0_0.shape == ()
y = p2(x, x, model_set_axis=False)
assert y.shape == (1, 4, 3)
# These are columns along the 2nd axis.
assert_allclose(y[:, :, 0].flatten(), t1(x, x))
assert_allclose(y[:, :, 1].flatten(), t2(x, x))
assert_allclose(y[:, :, 2].flatten(), t3(x, x))
def test_axis_0():
"""
Test that a model initialized with model_set_axis=0
can be evaluated with model_set_axis=False.
"""
p1 = Polynomial1D(1, n_models=2, model_set_axis=0)
p1.c0 = [2, 3]
p1.c1 = [1, 2]
t1 = Polynomial1D(1, c0=2, c1=1)
t2 = Polynomial1D(1, c0=3, c1=2)
with pytest.raises(ValueError):
p1(x)
y = p1(xx)
assert len(y) == 2
assert_allclose(y[0], t1(xx[0]))
assert_allclose(y[1], t2(xx[1]))
y = p1(x, model_set_axis=False)
assert len(y) == 2
assert_allclose(y[0], t1(x))
assert_allclose(y[1], t2(x))
y = p1(xx, model_set_axis=False)
assert len(y) == 2
assert_allclose(y[0], t1(xx))
assert_allclose(y[1], t2(xx))
y = p1(xxx, model_set_axis=False)
assert_allclose(y[0], t1(xxx))
assert_allclose(y[1], t2(xxx))
assert len(y) == 2
def test_negative_axis():
p1 = Polynomial1D(1, c0=[1, 2], c1=[3, 4], n_models=2, model_set_axis=-1)
t1 = Polynomial1D(1, c0=1,c1=3)
t2 = Polynomial1D(1, c0=2,c1=4)
with pytest.raises(ValueError):
p1(x)
with pytest.raises(ValueError):
p1(xx)
xxt = xx.T
y = p1(xxt)
assert_allclose(y[: ,0], t1(xxt[: ,0]))
assert_allclose(y[: ,1], t2(xxt[: ,1]))
def test_shapes():
p2 = Polynomial1D(1, n_models=3, model_set_axis=2)
assert p2.c0.shape == ()
assert p2.c1.shape == ()
p1 = Polynomial1D(1, n_models=2, model_set_axis=1)
assert p1.c0.shape == ()
assert p1.c1.shape == ()
p1 = Polynomial1D(1, c0=[1, 2], c1=[3, 4], n_models=2, model_set_axis=-1)
assert p1.c0.shape == ()
assert p1.c1.shape == ()
e1 = [1, 2]
e2 = [3, 4]
a1 = np.array([[10, 20], [30, 40]])
a2 = np.array([[50, 60], [70, 80]])
t = TParModel([a1, a2], [e1, e2], n_models=2, model_set_axis=-1)
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2,)
t = TParModel([[a1, a2]], [[e1, e2]], n_models=2, model_set_axis=1)
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2,)
t = TParModel([a1, a2], [e1, e2], n_models=2, model_set_axis=0)
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2,)
t = TParModel([a1, a2], e=[1, 2], n_models=2, model_set_axis=0)
assert t.coeff.shape == (2, 2)
assert t.e.shape == ()
def test_linearlsqfitter():
"""
Issue #7159
"""
p = Polynomial1D(1, n_models=2, model_set_axis=1)
# Generate data for fitting 2 models and re-stack them along the last axis:
y = np.array([2*x+1, x+4])
y = np.rollaxis(y, 0, -1).T
f = LinearLSQFitter()
# This seems to fit the model_set correctly:
fit = f(p, x, y)
model_y = fit(x, model_set_axis=False)
m1 = Polynomial1D(1, c0=fit.c0[0][0], c1=fit.c1[0][0])
m2 = Polynomial1D(1, c0=fit.c0[0][1], c1=fit.c1[0][1])
assert_allclose(model_y[:, 0], m1(x))
assert_allclose(model_y[:, 1], m2(x))
def test_model_set_axis_outputs():
fitter = LinearLSQFitter()
model_set = Polynomial2D(1, n_models=2, model_set_axis=2)
y2, x2 = np.mgrid[: 5, : 5]
# z = np.moveaxis([x2 + y2, 1 - 0.1 * x2 + 0.2 * y2]), 0, 2)
z = np.rollaxis(np.array([x2 + y2, 1 - 0.1 * x2 + 0.2 * y2]), 0, 3)
model = fitter(model_set, x2, y2, z)
res = model(x2, y2, model_set_axis=False)
assert z.shape == res.shape
# Test initializing with integer model_set_axis
# and evaluating with a different model_set_axis
model_set = Polynomial1D(1, c0=[1, 2], c1=[2, 3],
n_models=2, model_set_axis=0)
y0 = model_set(xx)
y1 = model_set(xx.T, model_set_axis=1)
assert_allclose(y0[0], y1[:, 0])
assert_allclose(y0[1], y1[:, 1])
model_set = Polynomial1D(1, c0=[[1, 2]], c1=[[2, 3]],
n_models=2, model_set_axis=1)
y0 = model_set(xx.T)
y1 = model_set(xx, model_set_axis=0)
assert_allclose(y0[:, 0], y1[0])
assert_allclose(y0[:, 1], y1[1])
with pytest.raises(ValueError):
model_set(x)
|
77cc288efb4e4cf869a9067bfdc1f75a910cc7cc8c6c98be4740d07be70e6697 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from ...wcs import wcs
from .. import models
from ... import units as u
from ...tests.helper import assert_quantity_allclose
@pytest.mark.parametrize(('inp'), [(0, 0), (4000, -20.56), (-2001.5, 45.9),
(0, 90), (0, -90), (np.mgrid[:4, :6])])
def test_against_wcslib(inp):
w = wcs.WCS()
crval = [202.4823228, 47.17511893]
w.wcs.crval = crval
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
lonpole = 180
tan = models.Pix2Sky_TAN()
n2c = models.RotateNative2Celestial(crval[0] * u.deg, crval[1] * u.deg, lonpole * u.deg)
c2n = models.RotateCelestial2Native(crval[0] * u.deg, crval[1] * u.deg, lonpole * u.deg)
m = tan | n2c
minv = c2n | tan.inverse
radec = w.wcs_pix2world(inp[0], inp[1], 1)
xy = w.wcs_world2pix(radec[0], radec[1], 1)
assert_allclose(m(*inp), radec, atol=1e-12)
assert_allclose(minv(*radec), xy, atol=1e-12)
@pytest.mark.parametrize(('inp'), [(40 * u.deg, -0.057 * u.rad), (21.5 * u.arcsec, 45.9 * u.deg)])
def test_roundtrip_sky_rotaion(inp):
lon, lat, lon_pole = 42 * u.deg, (43 * u.deg).to(u.arcsec), (44 * u.deg).to(u.rad)
n2c = models.RotateNative2Celestial(lon, lat, lon_pole)
c2n = models.RotateCelestial2Native(lon, lat, lon_pole)
assert_quantity_allclose(n2c.inverse(*n2c(*inp)), inp, atol=1e-13 * u.deg)
assert_quantity_allclose(c2n.inverse(*c2n(*inp)), inp, atol=1e-13 * u.deg)
def test_Rotation2D():
model = models.Rotation2D(angle=90 * u.deg)
a, b = 1 * u.deg, 0 * u.deg
x, y = model(a, b)
assert_quantity_allclose([x, y], [0 * u.deg, 1 * u.deg], atol=1e-10 * u.deg)
def test_Rotation2D_inverse():
model = models.Rotation2D(angle=234.23494 * u.deg)
x, y = model.inverse(*model(1 * u.deg, 0 * u.deg))
assert_quantity_allclose([x, y], [1 * u.deg, 0 * u.deg], atol=1e-10 * u.deg)
def test_euler_angle_rotations():
ydeg = (90 * u.deg, 0 * u.deg)
y = (90, 0)
z = (0, 90)
# rotate y into minus z
model = models.EulerAngleRotation(0 * u.rad, np.pi / 2 * u.rad, 0 * u.rad, 'zxz')
assert_allclose(model(*z), y, atol=10**-12)
model = models.EulerAngleRotation(0 * u.deg, 90 * u.deg, 0 * u.deg, 'zxz')
assert_quantity_allclose(model(*(z * u.deg)), ydeg, atol=10**-12 * u.deg)
@pytest.mark.parametrize(('params'), [(60, 10, 25),
(60 * u.deg, 10 * u.deg, 25 * u.deg),
((60 * u.deg).to(u.rad),
(10 * u.deg).to(u.rad),
(25 * u.deg).to(u.rad))])
def test_euler_rotations_with_units(params):
x = 1 * u.deg
y = 1 * u.deg
phi, theta, psi = params
urot = models.EulerAngleRotation(phi, theta, psi, axes_order='xyz')
a, b = urot(x.value, y.value)
assert_allclose((a, b), (-23.614457631192547, 9.631254579686113))
a, b = urot(x, y)
assert_quantity_allclose((a, b), (-23.614457631192547 * u.deg, 9.631254579686113 * u.deg))
a, b = urot(x.to(u.rad), y.to(u.rad))
assert_quantity_allclose((a, b), (-23.614457631192547 * u.deg, 9.631254579686113 * u.deg))
def test_attributes():
n2c = models.RotateNative2Celestial(20016 * u.arcsec, -72.3 * u.deg, np.pi * u.rad)
assert_allclose(n2c.lat.value, -72.3)
assert_allclose(n2c.lat._raw_value, -1.2618730491919001)
assert_allclose(n2c.lon.value, 20016)
assert_allclose(n2c.lon._raw_value, 0.09704030641088472)
assert_allclose(n2c.lon_pole.value, np.pi)
assert_allclose(n2c.lon_pole._raw_value, np.pi)
assert(n2c.lon.unit is u.Unit("arcsec"))
assert(n2c._param_metrics['lon']['raw_unit'] is u.Unit("rad"))
assert(n2c.lat.unit is u.Unit("deg"))
assert(n2c._param_metrics['lat']['raw_unit'] is u.Unit("rad"))
assert(n2c.lon_pole.unit is u.Unit("rad"))
assert(n2c._param_metrics['lon_pole']['raw_unit'] is u.Unit("rad"))
|
3e7e6ed9490ca8754e76d7fded5f5aa069cb392d13ee88d07474bf007e30ebe2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests that relate to evaluating models with quantity parameters
"""
import numpy as np
import pytest
from numpy.testing import assert_allclose
from ..core import Model
from ..models import Gaussian1D, Shift, Scale, Pix2Sky_TAN
from ... import units as u
from ...units import UnitsError
from ...tests.helper import assert_quantity_allclose
# We start off by taking some simple cases where the units are defined by
# whatever the model is initialized with, and we check that the model evaluation
# returns quantities.
def test_evaluate_with_quantities():
"""
Test evaluation of a single model with Quantity parameters that do
not explicitly require units.
"""
# We create two models here - one with quantities, and one without. The one
# without is used to create the reference values for comparison.
g = Gaussian1D(1, 1, 0.1)
gq = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# We first check that calling the Gaussian with quantities returns the
# expected result
assert_quantity_allclose(gq(1 * u.m), g(1) * u.J)
# Units have to be specified for the Gaussian with quantities - if not, an
# error is raised
with pytest.raises(UnitsError) as exc:
gq(1)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', (dimensionless), could not be "
"converted to required input units of m (length)")
# However, zero is a special case
assert_quantity_allclose(gq(0), g(0) * u.J)
# We can also evaluate models with equivalent units
assert_allclose(gq(0.0005 * u.km).value, g(0.5))
# But not with incompatible units
with pytest.raises(UnitsError) as exc:
gq(3 * u.s)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', s (time), could not be "
"converted to required input units of m (length)")
# We also can't evaluate the model without quantities with a quantity
with pytest.raises(UnitsError) as exc:
g(3 * u.m)
# TODO: determine what error message should be here
# assert exc.value.args[0] == ("Units of input 'x', m (length), could not be "
# "converted to required dimensionless input")
def test_evaluate_with_quantities_and_equivalencies():
"""
We now make sure that equivalencies are correctly taken into account
"""
g = Gaussian1D(1 * u.Jy, 10 * u.nm, 2 * u.nm)
# We aren't setting the equivalencies, so this won't work
with pytest.raises(UnitsError) as exc:
g(30 * u.PHz)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', PHz (frequency), could "
"not be converted to required input units of "
"nm (length)")
# But it should now work if we pass equivalencies when evaluating
assert_quantity_allclose(g(30 * u.PHz, equivalencies={'x': u.spectral()}),
g(9.993081933333332 * u.nm))
class MyTestModel(Model):
inputs = ('a', 'b')
outputs = ('f',)
def evaluate(self, a, b):
print('a', a)
print('b', b)
return a * b
class TestInputUnits():
def setup_method(self, method):
self.model = MyTestModel()
def test_evaluate(self):
# We should be able to evaluate with anything
assert_quantity_allclose(self.model(3, 5), 15)
assert_quantity_allclose(self.model(4 * u.m, 5), 20 * u.m)
assert_quantity_allclose(self.model(3 * u.deg, 5), 15 * u.deg)
def test_input_units(self):
self.model.input_units = {'a': u.deg}
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad)
assert_quantity_allclose(self.model(4 * u.rad, 2 * u.s), 8 * u.rad * u.s)
with pytest.raises(UnitsError) as exc:
self.model(4 * u.s, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'a', s (time), could not be "
"converted to required input units of deg (angle)")
with pytest.raises(UnitsError) as exc:
self.model(3, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'a', (dimensionless), could "
"not be converted to required input units of deg (angle)")
def test_input_units_allow_dimensionless(self):
self.model.input_units = {'a': u.deg}
self.model.input_units_allow_dimensionless = True
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad)
with pytest.raises(UnitsError) as exc:
self.model(4 * u.s, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'a', s (time), could not be "
"converted to required input units of deg (angle)")
assert_quantity_allclose(self.model(3, 3), 9)
def test_input_units_strict(self):
self.model.input_units = {'a': u.deg}
self.model.input_units_strict = True
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
result = self.model(np.pi * u.rad, 2)
assert_quantity_allclose(result, 360 * u.deg)
assert result.unit is u.deg
def test_input_units_equivalencies(self):
self.model.input_units = {'a': u.micron}
with pytest.raises(UnitsError) as exc:
self.model(3 * u.PHz, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'a', PHz (frequency), could "
"not be converted to required input units of "
"micron (length)")
self.model.input_units_equivalencies = {'a': u.spectral()}
assert_quantity_allclose(self.model(3 * u.PHz, 3),
3 * (3 * u.PHz).to(u.micron, equivalencies=u.spectral()))
def test_return_units(self):
self.model.input_units = {'a': u.deg}
self.model.return_units = {'f': u.rad}
result = self.model(3 * u.deg, 4)
assert_quantity_allclose(result, 12 * u.deg)
assert result.unit is u.rad
def test_return_units_scalar(self):
# Check that return_units also works when giving a single unit since
# there is only one output, so is unambiguous.
self.model.input_units = {'a': u.deg}
self.model.return_units = u.rad
result = self.model(3 * u.deg, 4)
assert_quantity_allclose(result, 12 * u.deg)
assert result.unit is u.rad
def test_and_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 & s2
out = cs(10 * u.arcsecond, 20 * u.arcsecond)
assert_quantity_allclose(out[0], 10 * u.deg + 10 * u.arcsec)
assert_quantity_allclose(out[1], 10 * u.deg + 20 * u.arcsec)
def test_plus_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 + s2
out = cs(10 * u.arcsecond)
assert_quantity_allclose(out, 20 * u.deg + 20 * u.arcsec)
def test_compound_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
out = cs(10 * u.arcsecond)
assert_quantity_allclose(out, 20 * u.deg + 10 * u.arcsec)
def test_compound_input_units_fail():
"""
Test incompatible units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
with pytest.raises(UnitsError):
cs(10 * u.pix)
def test_compound_incompatible_units_fail():
"""
Test incompatible model units in chain.
"""
s1 = Shift(10 * u.pix)
s2 = Shift(10 * u.deg)
cs = s1 | s2
with pytest.raises(UnitsError):
cs(10 * u.pix)
def test_compound_pipe_equiv_call():
"""
Check that equivalencies work when passed to evaluate, for a chained model
(which has one input).
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
out = cs(10 * u.pix, equivalencies={'x': u.pixel_scale(0.5 * u.deg / u.pix)})
assert_quantity_allclose(out, 25 * u.deg)
def test_compound_and_equiv_call():
"""
Check that equivalencies work when passed to evaluate, for a compsite model
with two inputs.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 & s2
out = cs(10 * u.pix, 10 * u.pix, equivalencies={'x0': u.pixel_scale(0.5 * u.deg / u.pix),
'x1': u.pixel_scale(0.5 * u.deg / u.pix)})
assert_quantity_allclose(out[0], 15 * u.deg)
assert_quantity_allclose(out[1], 15 * u.deg)
def test_compound_input_units_equivalencies():
"""
Test setting input_units_equivalencies on one of the models.
"""
s1 = Shift(10 * u.deg)
s1.input_units_equivalencies = {'x': u.pixel_scale(0.5 * u.deg / u.pix)}
s2 = Shift(10 * u.deg)
sp = Shift(10 * u.pix)
cs = s1 | s2
out = cs(10 * u.pix)
assert_quantity_allclose(out, 25 * u.deg)
cs = sp | s1
out = cs(10 * u.pix)
assert_quantity_allclose(out, 20 * u.deg)
cs = s1 & s2
cs = cs.rename('TestModel')
out = cs(20 * u.pix, 10 * u.deg)
assert_quantity_allclose(out, 20 * u.deg)
with pytest.raises(UnitsError) as exc:
out = cs(20 * u.pix, 10 * u.pix)
assert exc.value.args[0] == "TestModel: Units of input 'x1', pix (unknown), could not be converted to required input units of deg (angle)"
def test_compound_input_units_strict():
"""
Test setting input_units_strict on one of the models.
"""
class ScaleDegrees(Scale):
input_units = {'x': u.deg}
s1 = ScaleDegrees(2)
s2 = Scale(2)
cs = s1 | s2
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
assert out.unit is u.deg # important since this tests input_units_strict
cs = s2 | s1
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
assert out.unit is u.deg # important since this tests input_units_strict
cs = s1 & s2
out = cs(10 * u.arcsec, 10 * u.arcsec)
assert_quantity_allclose(out, 20 * u.arcsec)
assert out[0].unit is u.deg
assert out[1].unit is u.arcsec
def test_compound_input_units_allow_dimensionless():
"""
Test setting input_units_allow_dimensionless on one of the models.
"""
class ScaleDegrees(Scale):
input_units = {'x': u.deg}
s1 = ScaleDegrees(2)
s1.input_units_allow_dimensionless = True
s2 = Scale(2)
cs = s1 | s2
cs = cs.rename('TestModel')
out = cs(10)
assert_quantity_allclose(out, 40 * u.one)
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10 * u.m)
assert exc.value.args[0] == "TestModel: Units of input 'x', m (length), could not be converted to required input units of deg (angle)"
s1.input_units_allow_dimensionless = False
cs = s1 | s2
cs = cs.rename('TestModel')
with pytest.raises(UnitsError) as exc:
out = cs(10)
assert exc.value.args[0] == "TestModel: Units of input 'x', (dimensionless), could not be converted to required input units of deg (angle)"
s1.input_units_allow_dimensionless = True
cs = s2 | s1
cs = cs.rename('TestModel')
out = cs(10)
assert_quantity_allclose(out, 40 * u.one)
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10 * u.m)
assert exc.value.args[0] == "ScaleDegrees: Units of input 'x', m (length), could not be converted to required input units of deg (angle)"
s1.input_units_allow_dimensionless = False
cs = s2 | s1
with pytest.raises(UnitsError) as exc:
out = cs(10)
assert exc.value.args[0] == "ScaleDegrees: Units of input 'x', (dimensionless), could not be converted to required input units of deg (angle)"
s1.input_units_allow_dimensionless = True
s1 = ScaleDegrees(2)
s1.input_units_allow_dimensionless = True
s2 = ScaleDegrees(2)
s2.input_units_allow_dimensionless = False
cs = s1 & s2
cs = cs.rename('TestModel')
out = cs(10, 10 * u.arcsec)
assert_quantity_allclose(out[0], 20 * u.one)
assert_quantity_allclose(out[1], 20 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10, 10)
assert exc.value.args[0] == "TestModel: Units of input 'x1', (dimensionless), could not be converted to required input units of deg (angle)"
def test_compound_return_units():
"""
Test that return_units on the first model in the chain is respected for the
input to the second.
"""
class PassModel(Model):
inputs = ('x', 'y')
outputs = ('x', 'y')
@property
def input_units(self):
""" Input units. """
return {'x': u.deg, 'y': u.deg}
@property
def return_units(self):
""" Output units. """
return {'x': u.deg, 'y': u.deg}
def evaluate(self, x, y):
return x.value, y.value
cs = Pix2Sky_TAN() | PassModel()
assert_quantity_allclose(cs(0*u.deg, 0*u.deg), (0, 90)*u.deg)
|
c92db1093ac8421a9c92605ff918ef54720ce21b856884f103fe94734c8d6b4a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests fitting and model evaluation with various inputs
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
from .. import models
from .. import fitting
from ..core import Model, FittableModel, Fittable1DModel
from ..parameters import Parameter
try:
from scipy import optimize # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
model1d_params = [
(models.Polynomial1D, [2]),
(models.Legendre1D, [2]),
(models.Chebyshev1D, [2]),
(models.Shift, [2]),
(models.Scale, [2])
]
model2d_params = [
(models.Polynomial2D, [2]),
(models.Legendre2D, [1, 2]),
(models.Chebyshev2D, [1, 2])
]
class TestInputType:
"""
This class tests that models accept numbers, lists and arrays.
Add new models to one of the lists above to test for this.
"""
def setup_class(self):
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
@pytest.mark.parametrize(('model', 'params'), model1d_params)
def test_input1D(self, model, params):
m = model(*params)
m(self.x)
m(self.x1)
m(self.x2)
@pytest.mark.parametrize(('model', 'params'), model2d_params)
def test_input2D(self, model, params):
m = model(*params)
m(self.x, self.y)
m(self.x1, self.y1)
m(self.x2, self.y2)
class TestFitting:
"""Test various input options to fitting routines."""
def setup_class(self):
self.x1 = np.arange(10)
self.y, self.x = np.mgrid[:10, :10]
def test_linear_fitter_1set(self):
"""1 set 1D x, 1pset"""
expected = np.array([0, 1, 1, 1])
p1 = models.Polynomial1D(3)
p1.parameters = [0, 1, 1, 1]
y1 = p1(self.x1)
pfit = fitting.LinearLSQFitter()
model = pfit(p1, self.x1, y1)
assert_allclose(model.parameters, expected, atol=10 ** (-7))
def test_linear_fitter_Nset(self):
"""1 set 1D x, 2 sets 1D y, 2 param_sets"""
expected = np.array([[0, 0], [1, 1], [2, 2], [3, 3]])
p1 = models.Polynomial1D(3, n_models=2)
p1.parameters = [0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0]
params = {}
for i in range(4):
params[p1.param_names[i]] = [i, i]
p1 = models.Polynomial1D(3, model_set_axis=0, **params)
y1 = p1(self.x1, model_set_axis=False)
pfit = fitting.LinearLSQFitter()
model = pfit(p1, self.x1, y1)
assert_allclose(model.param_sets, expected, atol=10 ** (-7))
def test_linear_fitter_1dcheb(self):
"""1 pset, 1 set 1D x, 1 set 1D y, Chebyshev 1D polynomial"""
expected = np.array(
[[2817.2499999999995,
4226.6249999999991,
1680.7500000000009,
273.37499999999926]]).T
ch1 = models.Chebyshev1D(3)
ch1.parameters = [0, 1, 2, 3]
y1 = ch1(self.x1)
pfit = fitting.LinearLSQFitter()
model = pfit(ch1, self.x1, y1)
assert_allclose(model.param_sets, expected, atol=10 ** (-2))
def test_linear_fitter_1dlegend(self):
"""
1 pset, 1 set 1D x, 1 set 1D y, Legendre 1D polynomial
"""
expected = np.array(
[[1925.5000000000011,
3444.7500000000005,
1883.2500000000014,
364.4999999999996]]).T
leg1 = models.Legendre1D(3)
leg1.parameters = [1, 2, 3, 4]
y1 = leg1(self.x1)
pfit = fitting.LinearLSQFitter()
model = pfit(leg1, self.x1, y1)
assert_allclose(model.param_sets, expected, atol=10 ** (-12))
def test_linear_fitter_1set2d(self):
p2 = models.Polynomial2D(2)
p2.parameters = [0, 1, 2, 3, 4, 5]
expected = [0, 1, 2, 3, 4, 5]
z = p2(self.x, self.y)
pfit = fitting.LinearLSQFitter()
model = pfit(p2, self.x, self.y, z)
assert_allclose(model.parameters, expected, atol=10 ** (-12))
assert_allclose(model(self.x, self.y), z, atol=10 ** (-12))
def test_wrong_numpset(self):
"""
A ValueError is raised if a 1 data set (1d x, 1d y) is fit
with a model with multiple parameter sets.
"""
with pytest.raises(ValueError):
p1 = models.Polynomial1D(5)
y1 = p1(self.x1)
p1 = models.Polynomial1D(5, n_models=2)
pfit = fitting.LinearLSQFitter()
model = pfit(p1, self.x1, y1)
def test_wrong_pset(self):
"""A case of 1 set of x and multiple sets of y and parameters."""
expected = np.array([[1., 0],
[1, 1],
[1, 2],
[1, 3],
[1, 4],
[1, 5]])
p1 = models.Polynomial1D(5, n_models=2)
params = {}
for i in range(6):
params[p1.param_names[i]] = [1, i]
p1 = models.Polynomial1D(5, model_set_axis=0, **params)
y1 = p1(self.x1, model_set_axis=False)
pfit = fitting.LinearLSQFitter()
model = pfit(p1, self.x1, y1)
assert_allclose(model.param_sets, expected, atol=10 ** (-7))
@pytest.mark.skipif('not HAS_SCIPY')
def test_nonlinear_lsqt_1set_1d(self):
"""1 set 1D x, 1 set 1D y, 1 pset NonLinearFitter"""
g1 = models.Gaussian1D(10, mean=3, stddev=.2)
y1 = g1(self.x1)
gfit = fitting.LevMarLSQFitter()
model = gfit(g1, self.x1, y1)
assert_allclose(model.parameters, [10, 3, .2])
@pytest.mark.skipif('not HAS_SCIPY')
def test_nonlinear_lsqt_Nset_1d(self):
"""1 set 1D x, 1 set 1D y, 2 param_sets, NonLinearFitter"""
with pytest.raises(ValueError):
g1 = models.Gaussian1D([10.2, 10], mean=[3, 3.2], stddev=[.23, .2],
n_models=2)
y1 = g1(self.x1, model_set_axis=False)
gfit = fitting.LevMarLSQFitter()
model = gfit(g1, self.x1, y1)
@pytest.mark.skipif('not HAS_SCIPY')
def test_nonlinear_lsqt_1set_2d(self):
"""1 set 2d x, 1set 2D y, 1 pset, NonLinearFitter"""
g2 = models.Gaussian2D(10, x_mean=3, y_mean=4, x_stddev=.3,
y_stddev=.2, theta=0)
z = g2(self.x, self.y)
gfit = fitting.LevMarLSQFitter()
model = gfit(g2, self.x, self.y, z)
assert_allclose(model.parameters, [10, 3, 4, .3, .2, 0])
@pytest.mark.skipif('not HAS_SCIPY')
def test_nonlinear_lsqt_Nset_2d(self):
"""1 set 2d x, 1set 2D y, 2 param_sets, NonLinearFitter"""
with pytest.raises(ValueError):
g2 = models.Gaussian2D([10, 10], [3, 3], [4, 4], x_stddev=[.3, .3],
y_stddev=[.2, .2], theta=[0, 0], n_models=2)
z = g2(self.x.flatten(), self.y.flatten())
gfit = fitting.LevMarLSQFitter()
model = gfit(g2, self.x, self.y, z)
class TestEvaluation:
"""
Test various input options to model evaluation
TestFitting actually covers evaluation of polynomials
"""
def setup_class(self):
self.x1 = np.arange(20)
self.y, self.x = np.mgrid[:10, :10]
def test_non_linear_NYset(self):
"""
This case covers:
N param sets , 1 set 1D x --> N 1D y data
"""
g1 = models.Gaussian1D([10, 10], [3, 3], [.2, .2], n_models=2)
y1 = g1(self.x1, model_set_axis=False)
assert np.all((y1[0, :] - y1[1, :]).nonzero() == np.array([]))
def test_non_linear_NXYset(self):
"""
This case covers: N param sets , N sets 1D x --> N N sets 1D y data
"""
g1 = models.Gaussian1D([10, 10], [3, 3], [.2, .2], n_models=2)
xx = np.array([self.x1, self.x1])
y1 = g1(xx)
assert_allclose(y1[:, 0], y1[:, 1], atol=10 ** (-12))
def test_p1_1set_1pset(self):
"""1 data set, 1 pset, Polynomial1D"""
p1 = models.Polynomial1D(4)
y1 = p1(self.x1)
assert y1.shape == (20,)
def test_p1_nset_npset(self):
"""N data sets, N param_sets, Polynomial1D"""
p1 = models.Polynomial1D(4, n_models=2)
y1 = p1(np.array([self.x1, self.x1]).T, model_set_axis=-1)
assert y1.shape == (20, 2)
assert_allclose(y1[0, :], y1[1, :], atol=10 ** (-12))
def test_p2_1set_1pset(self):
"""1 pset, 1 2D data set, Polynomial2D"""
p2 = models.Polynomial2D(5)
z = p2(self.x, self.y)
assert z.shape == (10, 10)
def test_p2_nset_npset(self):
"""N param_sets, N 2D data sets, Poly2d"""
p2 = models.Polynomial2D(5, n_models=2)
xx = np.array([self.x, self.x])
yy = np.array([self.y, self.y])
z = p2(xx, yy)
assert z.shape == (2, 10, 10)
def test_nset_domain(self):
"""
Test model set with negative model_set_axis.
In this case model_set_axis=-1 is identical to model_set_axis=1.
"""
xx = np.array([self.x1, self.x1]).T
xx[0, 0] = 100
xx[1, 0] = 100
xx[2, 0] = 99
p1 = models.Polynomial1D(5, c0=[1, 2], c1=[3, 4], n_models=2)
yy = p1(xx, model_set_axis=-1)
assert_allclose(xx.shape, yy.shape)
yy1 = p1(xx, model_set_axis=1)
assert_allclose(yy, yy1)
#x1 = xx[:, 0]
#x2 = xx[:, 1]
#p1 = models.Polynomial1D(5)
#assert_allclose(p1(x1), yy[0, :], atol=10 ** (-12))
#p1 = models.Polynomial1D(5)
#assert_allclose(p1(x2), yy[1, :], atol=10 ** (-12))
def test_evaluate_gauss2d(self):
cov = np.array([[1., 0.8], [0.8, 3]])
g = models.Gaussian2D(1., 5., 4., cov_matrix=cov)
y, x = np.mgrid[:10, :10]
g(x, y)
class TModel_1_1(Fittable1DModel):
p1 = Parameter()
p2 = Parameter()
@staticmethod
def evaluate(x, p1, p2):
return x + p1 + p2
class TestSingleInputSingleOutputSingleModel:
"""
A suite of tests to check various cases of parameter and input combinations
on models with n_input = n_output = 1 on a toy model with n_models=1.
Many of these tests mirror test cases in
``astropy.modeling.tests.test_parameters.TestParameterInitialization``,
except that this tests how different parameter arrangements interact with
different types of model inputs.
"""
def test_scalar_parameters_scalar_input(self):
"""
Scalar parameters with a scalar input should return a scalar.
"""
t = TModel_1_1(1, 10)
y = t(100)
assert isinstance(y, float)
assert np.ndim(y) == 0
assert y == 111
def test_scalar_parameters_1d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_1(1, 10)
y = t(np.arange(5) * 100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (5,)
assert np.all(y == [11, 111, 211, 311, 411])
def test_scalar_parameters_2d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_1(1, 10)
y = t(np.arange(6).reshape(2, 3) * 100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (2, 3)
assert np.all(y == [[11, 111, 211],
[311, 411, 511]])
def test_scalar_parameters_3d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_1(1, 10)
y = t(np.arange(12).reshape(2, 3, 2) * 100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (2, 3, 2)
assert np.all(y == [[[11, 111], [211, 311], [411, 511]],
[[611, 711], [811, 911], [1011, 1111]]])
def test_1d_array_parameters_scalar_input(self):
"""
Array parameters should all be broadcastable with each other, and with
a scalar input the output should be broadcast to the maximum dimensions
of the parameters.
"""
t = TModel_1_1([1, 2], [10, 20])
y = t(100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (2,)
assert np.all(y == [111, 122])
def test_1d_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_1([1, 2], [10, 20])
y1 = t([100, 200])
assert np.shape(y1) == (2,)
assert np.all(y1 == [111, 222])
y2 = t([[100], [200]])
assert np.shape(y2) == (2, 2)
assert np.all(y2 == [[111, 122], [211, 222]])
with pytest.raises(ValueError):
# Doesn't broadcast
y3 = t([100, 200, 300])
def test_2d_array_parameters_2d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_1([[1, 2], [3, 4]], [[10, 20], [30, 40]])
y1 = t([[100, 200], [300, 400]])
assert np.shape(y1) == (2, 2)
assert np.all(y1 == [[111, 222], [333, 444]])
y2 = t([[[[100]], [[200]]], [[[300]], [[400]]]])
assert np.shape(y2) == (2, 2, 2, 2)
assert np.all(y2 == [[[[111, 122], [133, 144]],
[[211, 222], [233, 244]]],
[[[311, 322], [333, 344]],
[[411, 422], [433, 444]]]])
with pytest.raises(ValueError):
# Doesn't broadcast
y3 = t([[100, 200, 300], [400, 500, 600]])
def test_mixed_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_1([[[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]],
[[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]]],
[1, 2, 3])
y1 = t([10, 20, 30])
assert np.shape(y1) == (2, 2, 3)
assert_allclose(y1, [[[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]],
[[11.07, 22.08, 33.09], [11.10, 22.11, 33.12]]])
y2 = t([[[[10]]], [[[20]]], [[[30]]]])
assert np.shape(y2) == (3, 2, 2, 3)
assert_allclose(y2, [[[[11.01, 12.02, 13.03],
[11.04, 12.05, 13.06]],
[[11.07, 12.08, 13.09],
[11.10, 12.11, 13.12]]],
[[[21.01, 22.02, 23.03],
[21.04, 22.05, 23.06]],
[[21.07, 22.08, 23.09],
[21.10, 22.11, 23.12]]],
[[[31.01, 32.02, 33.03],
[31.04, 32.05, 33.06]],
[[31.07, 32.08, 33.09],
[31.10, 32.11, 33.12]]]])
class TestSingleInputSingleOutputTwoModel:
"""
A suite of tests to check various cases of parameter and input combinations
on models with n_input = n_output = 1 on a toy model with n_models=2.
Many of these tests mirror test cases in
``astropy.modeling.tests.test_parameters.TestParameterInitialization``,
except that this tests how different parameter arrangements interact with
different types of model inputs.
With n_models=2 all outputs should have a first dimension of size 2 (unless
defined with model_set_axis != 0).
"""
def test_scalar_parameters_scalar_input(self):
"""
Scalar parameters with a scalar input should return a 1-D array with
size equal to the number of models.
"""
t = TModel_1_1([1, 2], [10, 20], n_models=2)
y = t(100)
assert np.shape(y) == (2,)
assert np.all(y == [111, 122])
def test_scalar_parameters_1d_array_input(self):
"""
The dimension of the input should match the number of models unless
model_set_axis=False is given, in which case the input is copied across
all models.
"""
t = TModel_1_1([1, 2], [10, 20], n_models=2)
with pytest.raises(ValueError):
y = t(np.arange(5) * 100)
y1 = t([100, 200])
assert np.shape(y1) == (2,)
assert np.all(y1 == [111, 222])
y2 = t([100, 200], model_set_axis=False)
# In this case the value [100, 200, 300] should be evaluated on each
# model rather than evaluating the first model with 100 and the second
# model with 200
assert np.shape(y2) == (2, 2)
assert np.all(y2 == [[111, 211], [122, 222]])
y3 = t([100, 200, 300], model_set_axis=False)
assert np.shape(y3) == (2, 3)
assert np.all(y3 == [[111, 211, 311], [122, 222, 322]])
def test_scalar_parameters_2d_array_input(self):
"""
The dimension of the input should match the number of models unless
model_set_axis=False is given, in which case the input is copied across
all models.
"""
t = TModel_1_1([1, 2], [10, 20], n_models=2)
y1 = t(np.arange(6).reshape(2, 3) * 100)
assert np.shape(y1) == (2, 3)
assert np.all(y1 == [[11, 111, 211],
[322, 422, 522]])
y2 = t(np.arange(6).reshape(2, 3) * 100, model_set_axis=False)
assert np.shape(y2) == (2, 2, 3)
assert np.all(y2 == [[[11, 111, 211], [311, 411, 511]],
[[22, 122, 222], [322, 422, 522]]])
def test_scalar_parameters_3d_array_input(self):
"""
The dimension of the input should match the number of models unless
model_set_axis=False is given, in which case the input is copied across
all models.
"""
t = TModel_1_1([1, 2], [10, 20], n_models=2)
data = np.arange(12).reshape(2, 3, 2) * 100
y1 = t(data)
assert np.shape(y1) == (2, 3, 2)
assert np.all(y1 == [[[11, 111], [211, 311], [411, 511]],
[[622, 722], [822, 922], [1022, 1122]]])
y2 = t(data, model_set_axis=False)
assert np.shape(y2) == (2, 2, 3, 2)
assert np.all(y2 == np.array([data + 11, data + 22]))
def test_1d_array_parameters_scalar_input(self):
"""
Array parameters should all be broadcastable with each other, and with
a scalar input the output should be broadcast to the maximum dimensions
of the parameters.
"""
t = TModel_1_1([[1, 2, 3], [4, 5, 6]],
[[10, 20, 30], [40, 50, 60]], n_models=2)
y = t(100)
assert np.shape(y) == (2, 3)
assert np.all(y == [[111, 122, 133], [144, 155, 166]])
def test_1d_array_parameters_1d_array_input(self):
"""
When the input is an array, if model_set_axis=False then it must
broadcast with the shapes of the parameters (excluding the
model_set_axis).
Otherwise all dimensions must be broadcastable.
"""
t = TModel_1_1([[1, 2, 3], [4, 5, 6]],
[[10, 20, 30], [40, 50, 60]], n_models=2)
with pytest.raises(ValueError):
y1 = t([100, 200, 300])
y1 = t([100, 200])
assert np.shape(y1) == (2, 3)
assert np.all(y1 == [[111, 122, 133], [244, 255, 266]])
with pytest.raises(ValueError):
# Doesn't broadcast with the shape of the parameters, (3,)
y2 = t([100, 200], model_set_axis=False)
y2 = t([100, 200, 300], model_set_axis=False)
assert np.shape(y2) == (2, 3)
assert np.all(y2 == [[111, 222, 333],
[144, 255, 366]])
def test_2d_array_parameters_2d_array_input(self):
t = TModel_1_1([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
n_models=2)
y1 = t([[100, 200], [300, 400]])
assert np.shape(y1) == (2, 2, 2)
assert np.all(y1 == [[[111, 222], [133, 244]],
[[355, 466], [377, 488]]])
with pytest.raises(ValueError):
y2 = t([[100, 200, 300], [400, 500, 600]])
y2 = t([[[100, 200], [300, 400]], [[500, 600], [700, 800]]])
assert np.shape(y2) == (2, 2, 2)
assert np.all(y2 == [[[111, 222], [333, 444]],
[[555, 666], [777, 888]]])
def test_mixed_array_parameters_1d_array_input(self):
t = TModel_1_1([[[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]],
[[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]]],
[[1, 2, 3], [4, 5, 6]], n_models=2)
with pytest.raises(ValueError):
y = t([10, 20, 30])
y = t([10, 20, 30], model_set_axis=False)
assert np.shape(y) == (2, 2, 3)
assert_allclose(y, [[[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]],
[[14.07, 25.08, 36.09], [14.10, 25.11, 36.12]]])
class TModel_1_2(FittableModel):
inputs = ('x',)
outputs = ('y', 'z')
p1 = Parameter()
p2 = Parameter()
p3 = Parameter()
@staticmethod
def evaluate(x, p1, p2, p3):
return (x + p1 + p2, x + p1 + p2 + p3)
class TestSingleInputDoubleOutputSingleModel:
"""
A suite of tests to check various cases of parameter and input combinations
on models with n_input = 1 but n_output = 2 on a toy model with n_models=1.
As of writing there are not enough controls to adjust how outputs from such
a model should be formatted (currently the shapes of outputs are assumed to
be directly associated with the shapes of corresponding inputs when
n_inputs == n_outputs). For now, the approach taken for cases like this is
to assume all outputs should have the same format.
"""
def test_scalar_parameters_scalar_input(self):
"""
Scalar parameters with a scalar input should return a scalar.
"""
t = TModel_1_2(1, 10, 1000)
y, z = t(100)
assert isinstance(y, float)
assert isinstance(z, float)
assert np.ndim(y) == np.ndim(z) == 0
assert y == 111
assert z == 1111
def test_scalar_parameters_1d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_2(1, 10, 1000)
y, z = t(np.arange(5) * 100)
assert isinstance(y, np.ndarray)
assert isinstance(z, np.ndarray)
assert np.shape(y) == np.shape(z) == (5,)
assert np.all(y == [11, 111, 211, 311, 411])
assert np.all(z == (y + 1000))
def test_scalar_parameters_2d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_2(1, 10, 1000)
y, z = t(np.arange(6).reshape(2, 3) * 100)
assert isinstance(y, np.ndarray)
assert isinstance(z, np.ndarray)
assert np.shape(y) == np.shape(z) == (2, 3)
assert np.all(y == [[11, 111, 211],
[311, 411, 511]])
assert np.all(z == (y + 1000))
def test_scalar_parameters_3d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_2(1, 10, 1000)
y, z = t(np.arange(12).reshape(2, 3, 2) * 100)
assert isinstance(y, np.ndarray)
assert isinstance(z, np.ndarray)
assert np.shape(y) == np.shape(z) == (2, 3, 2)
assert np.all(y == [[[11, 111], [211, 311], [411, 511]],
[[611, 711], [811, 911], [1011, 1111]]])
assert np.all(z == (y + 1000))
def test_1d_array_parameters_scalar_input(self):
"""
Array parameters should all be broadcastable with each other, and with
a scalar input the output should be broadcast to the maximum dimensions
of the parameters.
"""
t = TModel_1_2([1, 2], [10, 20], [1000, 2000])
y, z = t(100)
assert isinstance(y, np.ndarray)
assert isinstance(z, np.ndarray)
assert np.shape(y) == np.shape(z) == (2,)
assert np.all(y == [111, 122])
assert np.all(z == [1111, 2122])
def test_1d_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_2([1, 2], [10, 20], [1000, 2000])
y1, z1 = t([100, 200])
assert np.shape(y1) == np.shape(z1) == (2,)
assert np.all(y1 == [111, 222])
assert np.all(z1 == [1111, 2222])
y2, z2 = t([[100], [200]])
assert np.shape(y2) == np.shape(z2) == (2, 2)
assert np.all(y2 == [[111, 122], [211, 222]])
assert np.all(z2 == [[1111, 2122], [1211, 2222]])
with pytest.raises(ValueError):
# Doesn't broadcast
y3, z3 = t([100, 200, 300])
def test_2d_array_parameters_2d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_2([[1, 2], [3, 4]], [[10, 20], [30, 40]],
[[1000, 2000], [3000, 4000]])
y1, z1 = t([[100, 200], [300, 400]])
assert np.shape(y1) == np.shape(z1) == (2, 2)
assert np.all(y1 == [[111, 222], [333, 444]])
assert np.all(z1 == [[1111, 2222], [3333, 4444]])
y2, z2 = t([[[[100]], [[200]]], [[[300]], [[400]]]])
assert np.shape(y2) == np.shape(z2) == (2, 2, 2, 2)
assert np.all(y2 == [[[[111, 122], [133, 144]],
[[211, 222], [233, 244]]],
[[[311, 322], [333, 344]],
[[411, 422], [433, 444]]]])
assert np.all(z2 == [[[[1111, 2122], [3133, 4144]],
[[1211, 2222], [3233, 4244]]],
[[[1311, 2322], [3333, 4344]],
[[1411, 2422], [3433, 4444]]]])
with pytest.raises(ValueError):
# Doesn't broadcast
y3, z3 = t([[100, 200, 300], [400, 500, 600]])
def test_mixed_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_2([[[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]],
[[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]]],
[1, 2, 3], [100, 200, 300])
y1, z1 = t([10, 20, 30])
assert np.shape(y1) == np.shape(z1) == (2, 2, 3)
assert_allclose(y1, [[[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]],
[[11.07, 22.08, 33.09], [11.10, 22.11, 33.12]]])
assert_allclose(z1, [[[111.01, 222.02, 333.03],
[111.04, 222.05, 333.06]],
[[111.07, 222.08, 333.09],
[111.10, 222.11, 333.12]]])
y2, z2 = t([[[[10]]], [[[20]]], [[[30]]]])
assert np.shape(y2) == np.shape(z2) == (3, 2, 2, 3)
assert_allclose(y2, [[[[11.01, 12.02, 13.03],
[11.04, 12.05, 13.06]],
[[11.07, 12.08, 13.09],
[11.10, 12.11, 13.12]]],
[[[21.01, 22.02, 23.03],
[21.04, 22.05, 23.06]],
[[21.07, 22.08, 23.09],
[21.10, 22.11, 23.12]]],
[[[31.01, 32.02, 33.03],
[31.04, 32.05, 33.06]],
[[31.07, 32.08, 33.09],
[31.10, 32.11, 33.12]]]])
assert_allclose(z2, [[[[111.01, 212.02, 313.03],
[111.04, 212.05, 313.06]],
[[111.07, 212.08, 313.09],
[111.10, 212.11, 313.12]]],
[[[121.01, 222.02, 323.03],
[121.04, 222.05, 323.06]],
[[121.07, 222.08, 323.09],
[121.10, 222.11, 323.12]]],
[[[131.01, 232.02, 333.03],
[131.04, 232.05, 333.06]],
[[131.07, 232.08, 333.09],
[131.10, 232.11, 333.12]]]])
class TInputFormatter(Model):
"""
A toy model to test input/output formatting.
"""
inputs = ('x', 'y')
outputs = ('x', 'y')
@staticmethod
def evaluate(x, y):
return x, y
def test_format_input_scalars():
model = TInputFormatter()
result = model(1, 2)
assert result == (1, 2)
def test_format_input_arrays():
model = TInputFormatter()
result = model([1, 1], [2, 2])
assert_allclose(result, (np.array([1, 1]), np.array([2, 2])))
def test_format_input_arrays_transposed():
model = TInputFormatter()
input = np.array([[1, 1]]).T, np.array([[2, 2]]).T
result = model(*input)
assert_allclose(result, input)
|
f1b2e5889190fb3efec4701223df81c5fe269eef4d1bfc2719cfedab7426f7aa | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for polynomial models."""
import os
from itertools import product
import pytest
import numpy as np
from numpy.testing import assert_allclose
from .. import fitting
from ... import wcs
from ...io import fits
from ..polynomial import (Chebyshev1D, Hermite1D, Legendre1D, Polynomial1D,
Chebyshev2D, Hermite2D, Legendre2D, Polynomial2D, SIP,
PolynomialBase, OrthoPolynomialBase)
from ..functional_models import Linear1D
from ..mappings import Identity
from ...utils.data import get_pkg_data_filename
try:
from scipy import optimize # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
linear1d = {
Chebyshev1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': 1.2}}
},
Hermite1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': 1.2}}
},
Legendre1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': 1.2}}
},
Polynomial1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': 1.2}}
},
Linear1D: {
'args': (),
'kwargs': {},
'parameters': {'intercept': 1.2, 'slope': 23.1},
'constraints': {'fixed': {'intercept': 1.2}}
}
}
linear2d = {
Chebyshev2D: {
'args': (1, 1),
'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2},
'constraints': {'fixed': {'c0_0': 1.2}}
},
Hermite2D: {
'args': (1, 1),
'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2},
'constraints': {'fixed': {'c0_0': 1.2}}
},
Legendre2D: {
'args': (1, 1),
'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2},
'constraints': {'fixed': {'c0_0': 1.2}}
},
Polynomial2D: {
'args': (1,),
'kwargs': {},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3},
'constraints': {'fixed': {'c0_0': 1.2}}
}
}
@pytest.mark.skipif('not HAS_SCIPY')
class TestFitting:
"""Test linear fitter with polynomial models."""
def setup_class(self):
self.N = 100
self.M = 100
self.x1 = np.linspace(1, 10, 100)
self.y2, self.x2 = np.mgrid[:100, :83]
rsn = np.random.RandomState(0)
self.n1 = rsn.randn(self.x1.size) * .1
self.n2 = rsn.randn(self.x2.size)
self.n2.shape = self.x2.shape
self.linear_fitter = fitting.LinearLSQFitter()
self.non_linear_fitter = fitting.LevMarLSQFitter()
# TODO: Most of these test cases have some pretty repetitive setup that we
# could probably factor out
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear1d, key=str), (False, True))))
def test_linear_fitter_1D(self, model_class, constraints):
"""Test fitting with LinearLSQFitter"""
model_args = linear1d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
y1 = model(self.x1)
model_lin = self.linear_fitter(model, self.x1, y1 + self.n1)
if constraints:
# For the constraints tests we're not checking the overall fit,
# just that the constraint was maintained
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_lin, param).value == expected
else:
assert_allclose(model_lin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear1d, key=str), (False, True))))
def test_non_linear_fitter_1D(self, model_class, constraints):
"""Test fitting with non-linear LevMarLSQFitter"""
model_args = linear1d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
y1 = model(self.x1)
model_nlin = self.non_linear_fitter(model, self.x1, y1 + self.n1)
if constraints:
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_nlin, param).value == expected
else:
assert_allclose(model_nlin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear2d, key=str), (False, True))))
def test_linear_fitter_2D(self, model_class, constraints):
"""Test fitting with LinearLSQFitter"""
model_args = linear2d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
z = model(self.x2, self.y2)
model_lin = self.linear_fitter(model, self.x2, self.y2, z + self.n2)
if constraints:
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_lin, param).value == expected
else:
assert_allclose(model_lin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear2d, key=str), (False, True))))
def test_non_linear_fitter_2D(self, model_class, constraints):
"""Test fitting with non-linear LevMarLSQFitter"""
model_args = linear2d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
z = model(self.x2, self.y2)
model_nlin = self.non_linear_fitter(model, self.x2, self.y2,
z + self.n2)
if constraints:
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_nlin, param).value == expected
else:
assert_allclose(model_nlin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize('model_class',
[cls for cls in list(linear1d) + list(linear2d)
if isinstance(cls, PolynomialBase)])
def test_polynomial_init_with_constraints(model_class):
"""
Test that polynomial models can be instantiated with constraints, but no
parameters specified.
Regression test for https://github.com/astropy/astropy/issues/3606
"""
# Just determine which parameter to place a constraint on; it doesn't
# matter which parameter it is to exhibit the problem so long as it's a
# valid parameter for the model
if '1D' in model_class.__name__:
param = 'c0'
else:
param = 'c0_0'
if issubclass(model_class, OrthoPolynomialBase):
degree = (2, 2)
else:
degree = (2,)
m = model_class(*degree, fixed={param: True})
assert m.fixed[param] is True
assert getattr(m, param).fixed is True
def test_sip_hst():
"""Test SIP against astropy.wcs"""
test_file = get_pkg_data_filename(os.path.join('data', 'hst_sip.hdr'))
hdr = fits.Header.fromtextfile(test_file)
crpix1 = hdr['CRPIX1']
crpix2 = hdr['CRPIX2']
wobj = wcs.WCS(hdr)
a_pars = dict(**hdr['A_*'])
b_pars = dict(**hdr['B_*'])
a_order = a_pars.pop('A_ORDER')
b_order = b_pars.pop('B_ORDER')
sip = SIP([crpix1, crpix2], a_order, b_order, a_pars, b_pars)
coords = [1, 1]
rel_coords = [1 - crpix1, 1 - crpix2]
astwcs_result = wobj.sip_pix2foc([coords], 1)[0] - rel_coords
assert_allclose(sip(1, 1), astwcs_result)
def test_sip_irac():
"""Test forward and inverse SIP againts astropy.wcs"""
test_file = get_pkg_data_filename(os.path.join('data', 'irac_sip.hdr'))
hdr = fits.Header.fromtextfile(test_file)
crpix1 = hdr['CRPIX1']
crpix2 = hdr['CRPIX2']
wobj = wcs.WCS(hdr)
a_pars = dict(**hdr['A_*'])
b_pars = dict(**hdr['B_*'])
ap_pars = dict(**hdr['AP_*'])
bp_pars = dict(**hdr['BP_*'])
a_order = a_pars.pop('A_ORDER')
b_order = b_pars.pop('B_ORDER')
ap_order = ap_pars.pop('AP_ORDER')
bp_order = bp_pars.pop('BP_ORDER')
del a_pars['A_DMAX']
del b_pars['B_DMAX']
pix = [200, 200]
rel_pix = [200 - crpix1, 200 - crpix2]
sip = SIP([crpix1, crpix2], a_order, b_order, a_pars, b_pars,
ap_order=ap_order, ap_coeff=ap_pars, bp_order=bp_order,
bp_coeff=bp_pars)
foc = wobj.sip_pix2foc([pix], 1)
newpix = wobj.sip_foc2pix(foc, 1)[0]
assert_allclose(sip(*pix), foc[0] - rel_pix)
assert_allclose(sip.inverse(*foc[0]) +
foc[0] - rel_pix, newpix - pix)
def test_sip_no_coeff():
sip = SIP([10, 12], 2, 2)
assert_allclose(sip.sip1d_a.parameters, [0., 0., 0])
assert_allclose(sip.sip1d_b.parameters, [0., 0., 0])
with pytest.raises(NotImplementedError):
sip.inverse
@pytest.mark.parametrize('cls', (Polynomial1D, Chebyshev1D, Legendre1D,
Polynomial2D, Chebyshev2D, Legendre2D))
def test_zero_degree_polynomial(cls):
"""
A few tests that degree=0 polynomials are correctly evaluated and
fitted.
Regression test for https://github.com/astropy/astropy/pull/3589
"""
if cls.n_inputs == 1: # Test 1D polynomials
p1 = cls(degree=0, c0=1)
assert p1(0) == 1
assert np.all(p1(np.zeros(5)) == np.ones(5))
x = np.linspace(0, 1, 100)
# Add a little noise along a straight line
y = 1 + np.random.uniform(0, 0.1, len(x))
p1_init = cls(degree=0)
fitter = fitting.LinearLSQFitter()
p1_fit = fitter(p1_init, x, y)
# The fit won't be exact of course, but it should get close to within
# 1%
assert_allclose(p1_fit.c0, 1, atol=0.10)
elif cls.n_inputs == 2: # Test 2D polynomials
if issubclass(cls, OrthoPolynomialBase):
p2 = cls(x_degree=0, y_degree=0, c0_0=1)
else:
p2 = cls(degree=0, c0_0=1)
assert p2(0, 0) == 1
assert np.all(p2(np.zeros(5), np.zeros(5)) == np.ones(5))
y, x = np.mgrid[0:1:100j, 0:1:100j]
z = (1 + np.random.uniform(0, 0.1, x.size)).reshape(100, 100)
if issubclass(cls, OrthoPolynomialBase):
p2_init = cls(x_degree=0, y_degree=0)
else:
p2_init = cls(degree=0)
fitter = fitting.LinearLSQFitter()
p2_fit = fitter(p2_init, x, y, z)
assert_allclose(p2_fit.c0_0, 1, atol=0.10)
@pytest.mark.skipif('not HAS_SCIPY')
def test_2d_orthopolynomial_in_compound_model():
"""
Ensure that OrthoPolynomialBase (ie. Chebyshev2D & Legendre2D) models get
evaluated & fitted correctly when part of a compound model.
Regression test for https://github.com/astropy/astropy/pull/6085.
"""
y, x = np.mgrid[0:5, 0:5]
z = x + y
fitter = fitting.LevMarLSQFitter()
simple_model = Chebyshev2D(2, 2)
simple_fit = fitter(simple_model, x, y, z)
fitter = fitting.LevMarLSQFitter() # re-init to compare like with like
compound_model = Identity(2) | Chebyshev2D(2, 2)
compound_fit = fitter(compound_model, x, y, z)
assert_allclose(simple_fit(x, y), compound_fit(x, y), atol=1e-15)
|
e5d30a1e6c48234254391a0cfb8b1c298f925dcd5bf5ea94eb9a249364845b6e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
from copy import deepcopy
import pickle
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from ..core import Model, ModelDefinitionError
from ..parameters import Parameter
from ..models import (Const1D, Shift, Scale, Rotation2D, Gaussian1D,
Gaussian2D, Polynomial1D, Polynomial2D,
Chebyshev2D, Legendre2D, Chebyshev1D, Legendre1D,
AffineTransformation2D, Identity, Mapping)
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, 5.0),
(lambda x, y: x - y, -1.0),
(lambda x, y: x * y, 6.0),
(lambda x, y: x / y, 2.0 / 3.0),
(lambda x, y: x ** y, 8.0)])
def test_two_model_class_arithmetic_1d(expr, result):
# Const1D is perhaps the simplest model to test basic arithmetic with.
# TODO: Should define more tests later on for more complicated
# combinations of models
S = expr(Const1D, Const1D)
assert issubclass(S, Model)
assert S.n_inputs == 1
assert S.n_outputs == 1
# Initialize an instance of the model, providing values for the two
# "amplitude" parameters
s = S(2, 3)
# It shouldn't matter what input we evaluate on since this is a constant
# function
out = s(0)
assert out == result
assert isinstance(out, float)
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, [5.0, 5.0]),
(lambda x, y: x - y, [-1.0, -1.0]),
(lambda x, y: x * y, [6.0, 6.0]),
(lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]),
(lambda x, y: x ** y, [8.0, 8.0])])
def test_model_set(expr, result):
s = expr(Const1D((2, 2), n_models=2), Const1D((3, 3), n_models=2))
out = s(0, model_set_axis=False)
assert_array_equal(out, result)
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, [5.0, 5.0]),
(lambda x, y: x - y, [-1.0, -1.0]),
(lambda x, y: x * y, [6.0, 6.0]),
(lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]),
(lambda x, y: x ** y, [8.0, 8.0])])
def test_model_set_raises_value_error(expr, result):
"""Check that creating model sets with components whose _n_models are
different raise a value error
"""
with pytest.raises(ValueError):
s = expr(Const1D((2, 2), n_models=2), Const1D(3, n_models=1))
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, 5.0),
(lambda x, y: x - y, -1.0),
(lambda x, y: x * y, 6.0),
(lambda x, y: x / y, 2.0 / 3.0),
(lambda x, y: x ** y, 8.0)])
def test_two_model_instance_arithmetic_1d(expr, result):
"""
Like test_two_model_class_arithmetic_1d, but creates a new model from two
model *instances* with fixed parameters.
"""
s = expr(Const1D(2), Const1D(3))
assert isinstance(s, Model)
assert s.n_inputs == 1
assert s.n_outputs == 1
out = s(0)
assert out == result
assert isinstance(out, float)
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, 5.0),
(lambda x, y: x - y, -1.0),
(lambda x, y: x * y, 6.0),
(lambda x, y: x / y, 2.0 / 3.0),
(lambda x, y: x ** y, 8.0)])
def test_two_model_mixed_arithmetic_1d(expr, result):
"""
Like test_two_model_class_arithmetic_1d, but creates a new model from an
expression of one model class with one model instance (and vice-versa).
"""
S1 = expr(Const1D, Const1D(3))
S2 = expr(Const1D(2), Const1D)
for cls in (S1, S2):
assert issubclass(cls, Model)
assert cls.n_inputs == 1
assert cls.n_outputs == 1
# Requires values for both amplitudes even though one of them them has a
# default
# TODO: We may wish to fix that eventually, so that if a parameter has a
# default it doesn't *have* to be given in the init
s1 = S1(2, 3)
s2 = S2(2, 3)
for out in (s1(0), s2(0)):
assert out == result
assert isinstance(out, float)
def test_simple_two_model_class_compose_1d():
"""
Shift and Scale are two of the simplest models to test model composition
with.
"""
S1 = Shift | Scale # First shift then scale
assert issubclass(S1, Model)
assert S1.n_inputs == 1
assert S1.n_outputs == 1
s1 = S1(2, 3) # Shift by 2 and scale by 3
assert s1(1) == 9.0
S2 = Scale | Shift # First scale then shift
assert issubclass(S2, Model)
assert S2.n_inputs == 1
assert S2.n_outputs == 1
s2 = S2(2, 3) # Scale by 2 then shift by 3
assert s2(1) == 5.0
# Test with array inputs
assert_array_equal(s2([1, 2, 3]), [5.0, 7.0, 9.0])
def test_simple_two_model_class_compose_2d():
"""
A simple example consisting of two rotations.
"""
R = Rotation2D | Rotation2D
assert issubclass(R, Model)
assert R.n_inputs == 2
assert R.n_outputs == 2
r1 = R(45, 45) # Rotate twice by 45 degrees
assert_allclose(r1(0, 1), (-1, 0), atol=1e-10)
r2 = R(90, 90) # Rotate twice by 90 degrees
assert_allclose(r2(0, 1), (0, -1), atol=1e-10)
# Compose R with itself to produce 4 rotations
R2 = R | R
r3 = R2(45, 45, 45, 45)
assert_allclose(r3(0, 1), (0, -1), atol=1e-10)
def test_n_submodels():
"""
Test that CompoundModel.n_submodels properly returns the number
of components.
"""
g2 = Gaussian1D() + Gaussian1D()
assert g2.n_submodels() == 2
g3 = g2 + Gaussian1D()
assert g3.n_submodels() == 3
g5 = g3 | g2
assert g5.n_submodels() == 5
g7 = g5 / g2
assert g7.n_submodels() == 7
# make sure it works as class method
p = Polynomial1D + Polynomial1D
assert p.n_submodels() == 2
def test_expression_formatting():
"""
Test that the expression strings from compound models are formatted
correctly.
"""
# For the purposes of this test it doesn't matter a great deal what
# model(s) are used in the expression, I don't think
G = Gaussian1D
G2 = Gaussian2D
M = G + G
assert M._format_expression() == '[0] + [1]'
M = G + G + G
assert M._format_expression() == '[0] + [1] + [2]'
M = G + G * G
assert M._format_expression() == '[0] + [1] * [2]'
M = G * G + G
assert M._format_expression() == '[0] * [1] + [2]'
M = G + G * G + G
assert M._format_expression() == '[0] + [1] * [2] + [3]'
M = (G + G) * (G + G)
assert M._format_expression() == '([0] + [1]) * ([2] + [3])'
# This example uses parentheses in the expression, but those won't be
# preserved in the expression formatting since they technically aren't
# necessary, and there's no way to know that they were originally
# parenthesized (short of some deep, and probably not worthwhile
# introspection)
M = (G * G) + (G * G)
assert M._format_expression() == '[0] * [1] + [2] * [3]'
M = G ** G
assert M._format_expression() == '[0] ** [1]'
M = G + G ** G
assert M._format_expression() == '[0] + [1] ** [2]'
M = (G + G) ** G
assert M._format_expression() == '([0] + [1]) ** [2]'
M = G + G | G
assert M._format_expression() == '[0] + [1] | [2]'
M = G + (G | G)
assert M._format_expression() == '[0] + ([1] | [2])'
M = G & G | G2
assert M._format_expression() == '[0] & [1] | [2]'
M = G & (G | G)
assert M._format_expression() == '[0] & ([1] | [2])'
def test_indexing_on_class():
"""
Test indexing on compound model class objects, including cases where the
submodels are classes, as well as instances, or both.
"""
g = Gaussian1D(1, 2, 3, name='g')
p = Polynomial1D(2, name='p')
M = Gaussian1D + Const1D
assert M[0] is Gaussian1D
assert M[1] is Const1D
assert M['Gaussian1D'] is M[0]
assert M['Const1D'] is M[1]
M = Gaussian1D + p
assert M[0] is Gaussian1D
assert isinstance(M['p'], Polynomial1D)
m = g + p
assert isinstance(m[0], Gaussian1D)
assert isinstance(m[1], Polynomial1D)
assert isinstance(m['g'], Gaussian1D)
assert isinstance(m['p'], Polynomial1D)
# Test negative indexing
assert isinstance(m[-1], Polynomial1D)
assert isinstance(m[-2], Gaussian1D)
with pytest.raises(IndexError):
m[42]
with pytest.raises(IndexError):
m['foobar']
# TODO: It would be good if there were an easier way to interrogate a compound
# model class for what expression it represents. Not sure what that would look
# like though.
def test_slicing_on_class():
"""
Test slicing a simple compound model class using integers.
"""
A = Const1D.rename('A')
B = Const1D.rename('B')
C = Const1D.rename('C')
D = Const1D.rename('D')
E = Const1D.rename('E')
F = Const1D.rename('F')
M = A + B - C * D / E ** F
assert M[0:1] is A
# This test will also check that the correct parameter names are generated
# for each slice (fairly trivial in this case since all the submodels have
# the same parameter, but if any corner cases are found that aren't covered
# by this test we can do something different...)
assert M[0:1].param_names == ('amplitude',)
# This looks goofy but if you slice by name to the sub-model of the same
# name it should just return that model, logically.
assert M['A':'A'] is A
assert M['A':'A'].param_names == ('amplitude',)
assert M[5:6] is F
assert M[5:6].param_names == ('amplitude',)
assert M['F':'F'] is F
assert M['F':'F'].param_names == ('amplitude',)
# 1 + 2
assert M[:2](1, 2)(0) == 3
assert M[:2].param_names == ('amplitude_0', 'amplitude_1')
assert M[:'B'](1, 2)(0) == 3
assert M[:'B'].param_names == ('amplitude_0', 'amplitude_1')
# 2 - 3
assert M[1:3](2, 3)(0) == -1
assert M[1:3].param_names == ('amplitude_1', 'amplitude_2')
assert M['B':'C'](2, 3)(0) == -1
assert M['B':'C'].param_names == ('amplitude_1', 'amplitude_2')
# 3 * 4
assert M[2:4](3, 4)(0) == 12
assert M[2:4].param_names == ('amplitude_2', 'amplitude_3')
assert M['C':'D'](3, 4)(0) == 12
assert M['C':'D'].param_names == ('amplitude_2', 'amplitude_3')
# 4 / 5
assert M[3:5](4, 5)(0) == 0.8
assert M[3:5].param_names == ('amplitude_3', 'amplitude_4')
assert M['D':'E'](4, 5)(0) == 0.8
assert M['D':'E'].param_names == ('amplitude_3', 'amplitude_4')
# 5 ** 6
assert M[4:6](5, 6)(0) == 15625
assert M[4:6].param_names == ('amplitude_4', 'amplitude_5')
assert M['E':'F'](5, 6)(0) == 15625
assert M['E':'F'].param_names == ('amplitude_4', 'amplitude_5')
def test_slicing_on_instance():
"""
Test slicing a simple compound model class using integers.
"""
A = Const1D.rename('A')
B = Const1D.rename('B')
C = Const1D.rename('C')
D = Const1D.rename('D')
E = Const1D.rename('E')
F = Const1D.rename('F')
M = A + B - C * D / E ** F
m = M(1, 2, 3, 4, 5, 6)
assert isinstance(m[0:1], A)
assert isinstance(m['A':'A'], A)
assert isinstance(m[5:6], F)
assert isinstance(m['F':'F'], F)
# 1 + 2
assert m[:'B'](0) == 3
assert m[:'B'].param_names == ('amplitude_0', 'amplitude_1')
assert np.all(m[:'B'].parameters == [1, 2])
# 2 - 3
assert m['B':'C'](0) == -1
assert m['B':'C'].param_names == ('amplitude_1', 'amplitude_2')
assert np.all(m['B':'C'].parameters == [2, 3])
# 3 * 4
assert m['C':'D'](0) == 12
assert m['C':'D'].param_names == ('amplitude_2', 'amplitude_3')
assert np.all(m['C':'D'].parameters == [3, 4])
# 4 / 5
assert m['D':'E'](0) == 0.8
assert m['D':'E'].param_names == ('amplitude_3', 'amplitude_4')
assert np.all(m['D':'E'].parameters == [4, 5])
# 5 ** 6
assert m['E':'F'](0) == 15625
assert m['E':'F'].param_names == ('amplitude_4', 'amplitude_5')
assert np.all(m['E':'F'].parameters == [5, 6])
def test_indexing_on_instance():
"""Test indexing on compound model instances."""
M = Gaussian1D + Const1D
m = M(1, 0, 0.1, 2)
assert isinstance(m[0], Gaussian1D)
assert isinstance(m[1], Const1D)
assert isinstance(m['Gaussian1D'], Gaussian1D)
assert isinstance(m['Const1D'], Const1D)
# Test parameter equivalence
assert m[0].amplitude == 1 == m.amplitude_0
assert m[0].mean == 0 == m.mean_0
assert m[0].stddev == 0.1 == m.stddev_0
assert m[1].amplitude == 2 == m.amplitude_1
# Test that parameter value updates are symmetric between the compound
# model and the submodel returned by indexing
const = m[1]
m.amplitude_1 = 42
assert const.amplitude == 42
const.amplitude = 137
assert m.amplitude_1 == 137
# Similar couple of tests, but now where the compound model was created
# from model instances
g = Gaussian1D(1, 2, 3, name='g')
p = Polynomial1D(2, name='p')
m = g + p
assert m[0].name == 'g'
assert m[1].name == 'p'
assert m['g'].name == 'g'
assert m['p'].name == 'p'
poly = m[1]
m.c0_1 = 12345
assert poly.c0 == 12345
poly.c1 = 6789
assert m.c1_1 == 6789
# Ensure this did *not* modify the original models we used as templates
assert p.c0 == 0
assert p.c1 == 0
# Test negative indexing
assert isinstance(m[-1], Polynomial1D)
assert isinstance(m[-2], Gaussian1D)
with pytest.raises(IndexError):
m[42]
with pytest.raises(IndexError):
m['foobar']
def test_basic_compound_inverse():
"""
Test basic inversion of compound models in the limited sense supported for
models made from compositions and joins only.
"""
t = (Shift(2) & Shift(3)) | (Scale(2) & Scale(3)) | Rotation2D(90)
assert_allclose(t.inverse(*t(0, 1)), (0, 1))
@pytest.mark.parametrize('model', [
Shift(0) + Shift(0) | Shift(0),
Shift(0) - Shift(0) | Shift(0),
Shift(0) * Shift(0) | Shift(0),
Shift(0) / Shift(0) | Shift(0),
Shift(0) ** Shift(0) | Shift(0),
Gaussian1D(1, 2, 3) | Gaussian1D(4, 5, 6)])
def test_compound_unsupported_inverse(model):
"""
Ensure inverses aren't supported in cases where it shouldn't be.
"""
with pytest.raises(NotImplementedError):
model.inverse
def test_mapping_basic_permutations():
"""
Tests a couple basic examples of the Mapping model--specifically examples
that merely permute the outputs.
"""
x, y = Rotation2D(90)(1, 2)
RS = Rotation2D | Mapping((1, 0))
x_prime, y_prime = RS(90)(1, 2)
assert_allclose((x, y), (y_prime, x_prime))
# A more complicated permutation
M = Rotation2D & Scale
m = M(90, 2)
x, y, z = m(1, 2, 3)
MS = M | Mapping((2, 0, 1))
ms = MS(90, 2)
x_prime, y_prime, z_prime = ms(1, 2, 3)
assert_allclose((x, y, z), (y_prime, z_prime, x_prime))
def test_mapping_inverse():
"""Tests inverting a compound model that includes a `Mapping`."""
RS = Rotation2D & Scale
# Rotates 2 of the coordinates and scales the third--then rotates on a
# different axis and scales on the axis of rotation. No physical meaning
# here just a simple test
M = RS | Mapping([2, 0, 1]) | RS
m = M(12.1, 13.2, 14.3, 15.4)
assert_allclose((0, 1, 2), m.inverse(*m(0, 1, 2)), atol=1e-08)
def test_identity_input():
"""
Test a case where an Identity (or Mapping) model is the first in a chain
of composite models and thus is responsible for handling input broadcasting
properly.
Regression test for https://github.com/astropy/astropy/pull/3362
"""
ident1 = Identity(1)
shift = Shift(1)
rotation = Rotation2D(angle=90)
model = ident1 & shift | rotation
assert_allclose(model(1, 2), [-3.0, 1.0])
# Same test case but using class composition
TestModel = ident1 & Shift | Rotation2D
model = TestModel(offset_1=1, angle_2=90)
assert_allclose(model(1, 2), [-3.0, 1.0])
def test_slicing_on_instances_2():
"""
More slicing tests.
Regression test for https://github.com/embray/astropy/pull/10
"""
model_a = Shift(1, name='a')
model_b = Shift(2, name='b')
model_c = Rotation2D(3, name='c')
model_d = Scale(2, name='d')
model_e = Scale(3, name='e')
m = (model_a & model_b) | model_c | (model_d & model_e)
with pytest.raises(ModelDefinitionError):
# The slice can't actually be taken since the resulting model cannot be
# evaluated
assert m[1:].submodel_names == ('b', 'c', 'd', 'e')
assert m[:].submodel_names == ('a', 'b', 'c', 'd', 'e')
assert m['a':].submodel_names == ('a', 'b', 'c', 'd', 'e')
with pytest.raises(ModelDefinitionError):
assert m['c':'d'].submodel_names == ('c', 'd')
assert m[1:2].name == 'b'
assert m[2:7].submodel_names == ('c', 'd', 'e')
with pytest.raises(IndexError):
m['x']
with pytest.raises(IndexError):
m['a': 'r']
with pytest.raises(ModelDefinitionError):
assert m[-4:4].submodel_names == ('b', 'c', 'd')
with pytest.raises(ModelDefinitionError):
assert m[-4:-2].submodel_names == ('b', 'c')
def test_slicing_on_instances_3():
"""
Like `test_slicing_on_instances_2` but uses a compound model that does not
have any invalid slices due to the resulting model being invalid
(originally test_slicing_on_instances_2 passed without any
ModelDefinitionErrors being raised, but that was before we prevented
invalid models from being created).
"""
model_a = Shift(1, name='a')
model_b = Shift(2, name='b')
model_c = Gaussian1D(3, 0, 0.1, name='c')
model_d = Scale(2, name='d')
model_e = Scale(3, name='e')
m = (model_a + model_b) | model_c | (model_d + model_e)
assert m[1:].submodel_names == ('b', 'c', 'd', 'e')
assert m[:].submodel_names == ('a', 'b', 'c', 'd', 'e')
assert m['a':].submodel_names == ('a', 'b', 'c', 'd', 'e')
assert m['c':'d'].submodel_names == ('c', 'd')
assert m[1:2].name == 'b'
assert m[2:7].submodel_names == ('c', 'd', 'e')
with pytest.raises(IndexError):
m['x']
with pytest.raises(IndexError):
m['a': 'r']
assert m[-4:4].submodel_names == ('b', 'c', 'd')
assert m[-4:-2].submodel_names == ('b', 'c')
def test_slicing_on_instance_with_parameterless_model():
"""
Regression test to fix an issue where the indices attached to parameter
names on a compound model were not handled properly when one or more
submodels have no parameters. This was especially evident in slicing.
"""
p2 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3)
p1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3)
mapping = Mapping((0, 1, 0, 1))
offx = Shift(-2, name='x_translation')
offy = Shift(-1, name='y_translation')
aff = AffineTransformation2D(matrix=[[1, 2], [3, 4]], name='rotation')
model = mapping | (p1 & p2) | (offx & offy) | aff
assert model.param_names == ('c0_0_1', 'c1_0_1', 'c0_1_1',
'c0_0_2', 'c1_0_2', 'c0_1_2',
'offset_3', 'offset_4',
'matrix_5', 'translation_5')
assert model(1, 2) == (23.0, 53.0)
m = model[3:]
assert m.param_names == ('offset_3', 'offset_4', 'matrix_5',
'translation_5')
assert m(1, 2) == (1.0, 1.0)
def test_compound_model_with_nonstandard_broadcasting():
"""
Ensure that the ``standard_broadcasting`` flag is properly propagated when
creating compound models.
See the commit message for the commit in which this was added for more
details.
"""
offx = Shift(1)
offy = Shift(2)
rot = AffineTransformation2D([[0, -1], [1, 0]])
m = (offx & offy) | rot
x, y = m(0, 0)
assert x == -2
assert y == 1
# make sure conversion back to scalars is working properly
assert isinstance(x, float)
assert isinstance(y, float)
x, y = m([0, 1, 2], [0, 1, 2])
assert np.all(x == [-2, -3, -4])
assert np.all(y == [1, 2, 3])
def test_compound_model_classify_attributes():
"""
Regression test for an issue raised here:
https://github.com/astropy/astropy/pull/3231#discussion_r22221123
The issue is that part of the `help` implementation calls a utility
function called `inspect.classify_class_attrs`, which was leading to an
infinite recursion.
This is a useful test in its own right just in that it tests that compound
models can be introspected in some useful way without crashing--this works
as sort of a test of its somewhat complicated internal state management.
This test does not check any of the results of
`~inspect.classify_class_attrs`, though it might be useful to at some
point.
"""
inspect.classify_class_attrs(Gaussian1D + Gaussian1D)
def test_invalid_operands():
"""
Test that certain operators do not work with models whose inputs/outputs do
not match up correctly.
"""
with pytest.raises(ModelDefinitionError):
Rotation2D | Gaussian1D
with pytest.raises(ModelDefinitionError):
Rotation2D(90) | Gaussian1D(1, 0, 0.1)
with pytest.raises(ModelDefinitionError):
Rotation2D + Gaussian1D
with pytest.raises(ModelDefinitionError):
Rotation2D(90) + Gaussian1D(1, 0, 0.1)
class _ConstraintsTestA(Model):
stddev = Parameter(default=0, min=0, max=0.3)
mean = Parameter(default=0, fixed=True)
@staticmethod
def evaluate(stddev, mean):
return stddev, mean
class _ConstraintsTestB(Model):
mean = Parameter(default=0, fixed=True)
@staticmethod
def evaluate(mean):
return mean
@pytest.mark.parametrize('model',
[Gaussian1D(bounds={'stddev': (0, 0.3)}, fixed={'mean': True}) +
Gaussian1D(fixed={'mean': True}),
(_ConstraintsTestA + _ConstraintsTestB)()])
def test_inherit_constraints(model):
"""
Various tests for copying of constraint values between compound models and
their members.
There are two versions of this test: One where a compound model is created
from two model instances, and another where a compound model is created
from two model classes that have default constraints set on some of their
parameters.
Regression test for https://github.com/astropy/astropy/issues/3481
"""
# We have to copy the model before modifying it, otherwise the test fails
# if it is run twice in a row, because the state of the model instance
# would be preserved from one run to the next.
model = deepcopy(model)
# Lots of assertions in this test as there are multiple interfaces to
# parameter constraints
assert 'stddev_0' in model.bounds
assert model.bounds['stddev_0'] == (0, 0.3)
assert model.stddev_0.bounds == (0, 0.3)
assert 'mean_0' in model.fixed
assert model.fixed['mean_0'] is True
assert model.mean_0.fixed is True
assert 'mean_1' in model.fixed
assert model.fixed['mean_1'] is True
assert model.mean_1.fixed is True
# Great, all the constraints were inherited properly
# Now what about if we update them through the sub-models?
model[0].stddev.bounds = (0, 0.4)
assert model.bounds['stddev_0'] == (0, 0.4)
assert model.stddev_0.bounds == (0, 0.4)
assert model[0].stddev.bounds == (0, 0.4)
assert model[0].bounds['stddev'] == (0, 0.4)
model[0].bounds['stddev'] = (0.1, 0.5)
assert model.bounds['stddev_0'] == (0.1, 0.5)
assert model.stddev_0.bounds == (0.1, 0.5)
assert model[0].stddev.bounds == (0.1, 0.5)
assert model[0].bounds['stddev'] == (0.1, 0.5)
model[1].mean.fixed = False
assert model.fixed['mean_1'] is False
assert model.mean_1.fixed is False
assert model[1].mean.fixed is False
assert model[1].fixed['mean'] is False
model[1].fixed['mean'] = True
assert model.fixed['mean_1'] is True
assert model.mean_1.fixed is True
assert model[1].mean.fixed is True
assert model[1].fixed['mean'] is True
def test_compound_custom_inverse():
"""
Test that a compound model with a custom inverse has that inverse applied
when the inverse of another model, of which it is a component, is computed.
Regression test for https://github.com/astropy/astropy/issues/3542
"""
poly = Polynomial1D(1, c0=1, c1=2)
scale = Scale(1)
shift = Shift(1)
model1 = poly | scale
model1.inverse = poly
# model1 now has a custom inverse (the polynomial itself, ignoring the
# trivial scale factor)
model2 = shift | model1
assert_allclose(model2.inverse(1), (poly | shift.inverse)(1))
# Make sure an inverse is not allowed if the models were combined with the
# wrong operator, or if one of the models doesn't have an inverse defined
with pytest.raises(NotImplementedError):
(shift + model1).inverse
with pytest.raises(NotImplementedError):
(model1 & poly).inverse
@pytest.mark.parametrize('poly', [Chebyshev2D(1, 2), Polynomial2D(2), Legendre2D(1, 2),
Chebyshev1D(5), Legendre1D(5), Polynomial1D(5)])
def test_compound_with_polynomials(poly):
"""
Tests that polynomials are scaled when used in compound models.
Issue #3699
"""
poly.parameters = [1, 2, 3, 4, 1, 2]
shift = Shift(3)
model = poly | shift
x, y = np.mgrid[:20, :37]
result_compound = model(x, y)
result = shift(poly(x, y))
assert_allclose(result, result_compound)
# has to be defined at module level since pickling doesn't work right (in
# general) for classes defined in functions
class _TestPickleModel(Gaussian1D + Gaussian1D):
pass
def test_pickle_compound():
"""
Regression test for
https://github.com/astropy/astropy/issues/3867#issuecomment-114547228
"""
# Test pickling a compound model class
GG = Gaussian1D + Gaussian1D
GG2 = pickle.loads(pickle.dumps(GG))
assert GG.param_names == GG2.param_names
assert GG.__name__ == GG2.__name__
# Test that it works, or at least evaluates successfully
assert GG()(0.12345) == GG2()(0.12345)
# Test pickling a compound model instance
g1 = Gaussian1D(1.0, 0.0, 0.1)
g2 = Gaussian1D([2.0, 3.0], [0.0, 0.0], [0.2, 0.3])
m = g1 + g2
m2 = pickle.loads(pickle.dumps(m))
assert m.param_names == m2.param_names
assert m.__class__.__name__ == m2.__class__.__name__
assert np.all(m.parameters == m2.parameters)
assert np.all(m(0) == m2(0))
# Test pickling a concrete class
p = pickle.dumps(_TestPickleModel, protocol=0)
# Note: This is very dependent on the specific protocol, but the point of
# this test is that the "concrete" model is pickled in a very simple way
# that only specifies the module and class name, and is unpickled by
# re-importing the class from the module in which it was defined. This
# should still work for concrete subclasses of compound model classes that
# were dynamically generated through an expression
exp = b'castropy.modeling.tests.test_compound\n_TestPickleModel\np0\n.'
# When testing against the expected value we drop the memo length field
# at the end, which may differ between runs
assert p[:p.rfind(b'p')] == exp[:exp.rfind(b'p')]
assert pickle.loads(p) is _TestPickleModel
def test_update_parameters():
offx = Shift(1)
scl = Scale(2)
m = offx | scl
assert(m(1) == 4)
offx.offset = 42
assert(m(1) == 4)
m.factor_1 = 100
assert(m(1) == 200)
m2 = m | offx
assert(m2(1) == 242)
def test_name():
offx = Shift(1)
scl = Scale(2)
m = offx | scl
scl.name = "scale"
assert m._submodel_names == ('None_0', 'None_1')
assert m.name is None
m.name = "M"
assert m.name == "M"
m1 = m.rename("M1")
assert m.name == "M"
assert m1.name == "M1"
|
cf2cea29a055d6bfb34bc4418debfe4f944a7610998b1161d226326ff0776112 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from inspect import signature
from numpy.testing import assert_allclose
from ..core import Model, custom_model
from ..parameters import Parameter
from .. import models
class NonFittableModel(Model):
"""An example class directly subclassing Model for testing."""
a = Parameter()
def __init__(self, a, model_set_axis=None):
super().__init__(a, model_set_axis=model_set_axis)
@staticmethod
def evaluate():
pass
def test_Model_instance_repr_and_str():
m = NonFittableModel(42.5)
assert repr(m) == "<NonFittableModel(a=42.5)>"
assert (str(m) ==
"Model: NonFittableModel\n"
"Inputs: ()\n"
"Outputs: ()\n"
"Model set size: 1\n"
"Parameters:\n"
" a \n"
" ----\n"
" 42.5")
assert len(m) == 1
def test_Model_array_parameter():
model = models.Gaussian1D(4, 2, 1)
assert_allclose(model.param_sets, [[4], [2], [1]])
def test_inputless_model():
"""
Regression test for
https://github.com/astropy/astropy/pull/3772#issuecomment-101821641
"""
class TestModel(Model):
inputs = ()
outputs = ('y',)
a = Parameter()
@staticmethod
def evaluate(a):
return a
m = TestModel(1)
assert m.a == 1
assert m() == 1
# Test array-like output
m = TestModel([1, 2, 3], model_set_axis=False)
assert len(m) == 1
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[1, 2, 3], model_set_axis=0)
assert len(m) == 3
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=0)
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
def test_ParametericModel():
with pytest.raises(TypeError):
models.Gaussian1D(1, 2, 3, wrong=4)
def test_custom_model_signature():
"""
Tests that the signatures for the __init__ and __call__
methods of custom models are useful.
"""
@custom_model
def model_a(x):
return x
assert model_a.param_names == ()
assert model_a.n_inputs == 1
sig = signature(model_a.__init__)
assert list(sig.parameters.keys()) == ['self', 'args', 'meta', 'name', 'kwargs']
sig = signature(model_a.__call__)
assert list(sig.parameters.keys()) == ['self', 'x', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies']
@custom_model
def model_b(x, a=1, b=2):
return x + a + b
assert model_b.param_names == ('a', 'b')
assert model_b.n_inputs == 1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'b', 'kwargs']
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == ['self', 'x', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies']
@custom_model
def model_c(x, y, a=1, b=2):
return x + y + a + b
assert model_c.param_names == ('a', 'b')
assert model_c.n_inputs == 2
sig = signature(model_c.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'b', 'kwargs']
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_c.__call__)
assert list(sig.parameters.keys()) == ['self', 'x', 'y', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies']
def test_custom_model_subclass():
"""Test that custom models can be subclassed."""
@custom_model
def model_a(x, a=1):
return x * a
class model_b(model_a):
# Override the evaluate from model_a
@classmethod
def evaluate(cls, x, a):
return -super().evaluate(x, a)
b = model_b()
assert b.param_names == ('a',)
assert b.a == 1
assert b(1) == -1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'kwargs']
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == ['self', 'x', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies']
def test_custom_model_parametrized_decorator():
"""Tests using custom_model as a decorator with parameters."""
def cosine(x, amplitude=1):
return [amplitude * np.cos(x)]
@custom_model(fit_deriv=cosine)
def sine(x, amplitude=1):
return amplitude * np.sin(x)
assert issubclass(sine, Model)
s = sine(2)
assert_allclose(s(np.pi / 2), 2)
assert_allclose(s.fit_deriv(0, 2), 2)
def test_custom_inverse():
"""Test setting a custom inverse on a model."""
p = models.Polynomial1D(1, c0=-2, c1=3)
# A trivial inverse for a trivial polynomial
inv = models.Polynomial1D(1, c0=(2./3.), c1=(1./3.))
with pytest.raises(NotImplementedError):
p.inverse
p.inverse = inv
x = np.arange(100)
assert_allclose(x, p(p.inverse(x)))
assert_allclose(x, p.inverse(p(x)))
p.inverse = None
with pytest.raises(NotImplementedError):
p.inverse
def test_custom_inverse_reset():
"""Test resetting a custom inverse to the model's default inverse."""
class TestModel(Model):
inputs = ()
outputs = ('y',)
@property
def inverse(self):
return models.Shift()
@staticmethod
def evaluate():
return 0
# The above test model has no meaning, nor does its inverse--this just
# tests that setting an inverse and resetting to the default inverse works
m = TestModel()
assert isinstance(m.inverse, models.Shift)
m.inverse = models.Scale()
assert isinstance(m.inverse, models.Scale)
del m.inverse
assert isinstance(m.inverse, models.Shift)
def test_render_model_2d():
imshape = (71, 141)
image = np.zeros(imshape)
coords = y, x = np.indices(imshape)
model = models.Gaussian2D(x_stddev=6.1, y_stddev=3.9, theta=np.pi / 3)
# test points for edges
ye, xe = [0, 35, 70], [0, 70, 140]
# test points for floating point positions
yf, xf = [35.1, 35.5, 35.9], [70.1, 70.5, 70.9]
test_pts = [(a, b) for a in xe for b in ye]
test_pts += [(a, b) for a in xf for b in yf]
for x0, y0 in test_pts:
model.x_mean = x0
model.y_mean = y0
expected = model(x, y)
for xy in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (xy is None):
# this case is tested in Fittable2DModelTester
continue
actual = model.render(out=im, coords=xy)
if im is None:
assert_allclose(actual, model.render(coords=xy))
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, y0) == (70, 35):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
# test an error is raised when the bounding box is larger than the input array
try:
actual = model.render(out=np.zeros((1, 1)))
except ValueError:
pass
def test_render_model_1d():
npix = 101
image = np.zeros(npix)
coords = np.arange(npix)
model = models.Gaussian1D()
# test points
test_pts = [0, 49.1, 49.5, 49.9, 100]
# test widths
test_stdv = np.arange(5.5, 6.7, .2)
for x0, stdv in [(p, s) for p in test_pts for s in test_stdv]:
model.mean = x0
model.stddev = stdv
expected = model(coords)
for x in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (x is None):
# this case is tested in Fittable1DModelTester
continue
actual = model.render(out=im, coords=x)
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, stdv) == (49.5, 5.5):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
def test_render_model_3d():
imshape = (17, 21, 27)
image = np.zeros(imshape)
coords = np.indices(imshape)
def ellipsoid(x, y, z, x0=13., y0=10., z0=8., a=4., b=3., c=2., amp=1.):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(custom_model(ellipsoid)):
@property
def bounding_box(self):
return ((self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a))
model = Ellipsoid3D()
# test points for edges
ze, ye, xe = [0, 8, 16], [0, 10, 20], [0, 13, 26]
# test points for floating point positions
zf, yf, xf = [8.1, 8.5, 8.9], [10.1, 10.5, 10.9], [13.1, 13.5, 13.9]
test_pts = [(x, y, z) for x in xe for y in ye for z in ze]
test_pts += [(x, y, z) for x in xf for y in yf for z in zf]
for x0, y0, z0 in test_pts:
model.x0 = x0
model.y0 = y0
model.z0 = z0
expected = model(*coords[::-1])
for c in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (c is None):
continue
actual = model.render(out=im, coords=c)
boxed = model.render()
# assert images match
assert_allclose(expected, actual)
# assert model fully captured
if (z0, y0, x0) == (8, 10, 13):
boxed = model.render()
assert (np.sum(expected) - np.sum(boxed)) == 0
def test_custom_bounding_box_1d():
"""
Tests that the bounding_box setter works.
"""
# 1D models
g1 = models.Gaussian1D()
bb = g1.bounding_box
expected = g1.render()
# assign the same bounding_box, now through the bounding_box setter
g1.bounding_box = bb
assert_allclose(g1.render(), expected)
# 2D models
g2 = models.Gaussian2D()
bb = g2.bounding_box
expected = g2.render()
# assign the same bounding_box, now through the bounding_box setter
g2.bounding_box = bb
assert_allclose(g2.render(), expected)
def test_n_submodels_in_single_models():
assert models.Gaussian1D.n_submodels() == 1
assert models.Gaussian2D.n_submodels() == 1
def test_compound_deepcopy():
model = (models.Gaussian1D(10, 2,3) | models.Shift(2)) & models.Rotation2D(21.3)
new_model = model.deepcopy()
assert id(model) != id(new_model)
assert id(model._submodels) != id(new_model._submodels)
assert id(model._submodels[0]) != id(new_model._submodels[0])
assert id(model._submodels[1]) != id(new_model._submodels[1])
assert id(model._submodels[2]) != id(new_model._submodels[2])
|
0db0536a0344da3f02b723b42f13b3e34138145811b37a4343c696069cad5741 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests models.parameters
"""
import itertools
import pytest
import numpy as np
from numpy.testing import (assert_allclose, assert_equal, assert_array_equal,
assert_almost_equal)
from . import irafutil
from .. import models, fitting
from ..core import Model, FittableModel
from ..parameters import Parameter, InputParameterError
from ...utils.data import get_pkg_data_filename
def setter1(val):
return val
def setter2(val, model):
model.do_something(val)
return val * model.p
class SetterModel(FittableModel):
inputs = ('x', 'y')
outputs = ('z',)
xc = Parameter(default=1, setter=setter1)
yc = Parameter(default=1, setter=setter2)
def __init__(self, xc, yc, p):
self.p = p # p is a value intended to be used by the setter
super().__init__()
self.xc = xc
self.yc = yc
def evaluate(self, x, y, xc, yc):
return ((x - xc)**2 + (y - yc)**2)
def do_something(self, v):
pass
class TParModel(Model):
"""
A toy model to test parameters machinery
"""
coeff = Parameter()
e = Parameter()
def __init__(self, coeff, e, **kwargs):
super().__init__(coeff=coeff, e=e, **kwargs)
@staticmethod
def evaluate(coeff, e):
pass
class MockModel(FittableModel):
alpha = Parameter(name='alpha', default=42)
@staticmethod
def evaluate(*args):
pass
def test_parameter_properties():
"""Test if getting / setting of Parameter properties works."""
m = MockModel()
p = m.alpha
assert p.name == 'alpha'
# Parameter names are immutable
with pytest.raises(AttributeError):
p.name = 'beta'
assert p.fixed is False
p.fixed = True
assert p.fixed is True
assert p.tied is False
p.tied = lambda _: 0
p.tied = False
assert p.tied is False
assert p.min is None
p.min = 42
assert p.min == 42
p.min = None
assert p.min is None
assert p.max is None
# TODO: shouldn't setting a max < min give an error?
p.max = 41
assert p.max == 41
def test_parameter_operators():
"""Test if the parameter arithmetic operators work."""
m = MockModel()
par = m.alpha
num = 42.
val = 3
assert par - val == num - val
assert val - par == val - num
assert par / val == num / val
assert val / par == val / num
assert par ** val == num ** val
assert val ** par == val ** num
assert par < 45
assert par > 41
assert par <= par
assert par >= par
assert par == par
assert -par == -num
assert abs(par) == abs(num)
class TestParameters:
def setup_class(self):
"""
Unit tests for parameters
Read an iraf database file created by onedspec.identify. Use the
information to create a 1D Chebyshev model and perform the same fit.
Create also a gausian model.
"""
test_file = get_pkg_data_filename('data/idcompspec.fits')
f = open(test_file)
lines = f.read()
reclist = lines.split("begin")
f.close()
record = irafutil.IdentifyRecord(reclist[1])
self.icoeff = record.coeff
order = int(record.fields['order'])
self.model = models.Chebyshev1D(order - 1)
self.gmodel = models.Gaussian1D(2, mean=3, stddev=4)
self.linear_fitter = fitting.LinearLSQFitter()
self.x = record.x
self.y = record.z
self.yy = np.array([record.z, record.z])
def test_set_slice(self):
"""
Tests updating the parameters attribute with a slice.
This is what fitters internally do.
"""
self.model.parameters[:] = np.array([3, 4, 5, 6, 7])
assert (self.model.parameters == [3., 4., 5., 6., 7.]).all()
def test_set_parameters_as_list(self):
"""Tests updating parameters using a list."""
self.model.parameters = [30, 40, 50, 60, 70]
assert (self.model.parameters == [30., 40., 50., 60, 70]).all()
def test_set_parameters_as_array(self):
"""Tests updating parameters using an array."""
self.model.parameters = np.array([3, 4, 5, 6, 7])
assert (self.model.parameters == [3., 4., 5., 6., 7.]).all()
def test_set_as_tuple(self):
"""Tests updating parameters using a tuple."""
self.model.parameters = (1, 2, 3, 4, 5)
assert (self.model.parameters == [1, 2, 3, 4, 5]).all()
def test_set_model_attr_seq(self):
"""
Tests updating the parameters attribute when a model's
parameter (in this case coeff) is updated.
"""
self.model.parameters = [0, 0., 0., 0, 0]
self.model.c0 = 7
assert (self.model.parameters == [7, 0., 0., 0, 0]).all()
def test_set_model_attr_num(self):
"""Update the parameter list when a model's parameter is updated."""
self.gmodel.amplitude = 7
assert (self.gmodel.parameters == [7, 3, 4]).all()
def test_set_item(self):
"""Update the parameters using indexing."""
self.model.parameters = [1, 2, 3, 4, 5]
self.model.parameters[0] = 10.
assert (self.model.parameters == [10, 2, 3, 4, 5]).all()
assert self.model.c0 == 10
def test_wrong_size1(self):
"""
Tests raising an error when attempting to reset the parameters
using a list of a different size.
"""
with pytest.raises(InputParameterError):
self.model.parameters = [1, 2, 3]
def test_wrong_size2(self):
"""
Tests raising an exception when attempting to update a model's
parameter (in this case coeff) with a sequence of the wrong size.
"""
with pytest.raises(InputParameterError):
self.model.c0 = [1, 2, 3]
def test_wrong_shape(self):
"""
Tests raising an exception when attempting to update a model's
parameter and the new value has the wrong shape.
"""
with pytest.raises(InputParameterError):
self.gmodel.amplitude = [1, 2]
def test_par_against_iraf(self):
"""
Test the fitter modifies model.parameters.
Uses an iraf example.
"""
new_model = self.linear_fitter(self.model, self.x, self.y)
print(self.y, self.x)
assert_allclose(new_model.parameters,
np.array(
[4826.1066602783685, 952.8943813407858,
12.641236013982386,
-1.7910672553339604,
0.90252884366711317]),
rtol=10 ** (-2))
def testPolynomial1D(self):
d = {'c0': 11, 'c1': 12, 'c2': 13, 'c3': 14}
p1 = models.Polynomial1D(3, **d)
assert_equal(p1.parameters, [11, 12, 13, 14])
def test_poly1d_multiple_sets(self):
p1 = models.Polynomial1D(3, n_models=3)
assert_equal(p1.parameters, [0.0, 0.0, 0.0, 0, 0, 0,
0, 0, 0, 0, 0, 0])
assert_array_equal(p1.c0, [0, 0, 0])
p1.c0 = [10, 10, 10]
assert_equal(p1.parameters, [10.0, 10.0, 10.0, 0, 0,
0, 0, 0, 0, 0, 0, 0])
def test_par_slicing(self):
"""
Test assigning to a parameter slice
"""
p1 = models.Polynomial1D(3, n_models=3)
p1.c0[:2] = [10, 10]
assert_equal(p1.parameters, [10.0, 10.0, 0.0, 0, 0,
0, 0, 0, 0, 0, 0, 0])
def test_poly2d(self):
p2 = models.Polynomial2D(degree=3)
p2.c0_0 = 5
assert_equal(p2.parameters, [5, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_poly2d_multiple_sets(self):
kw = {'c0_0': [2, 3], 'c1_0': [1, 2], 'c2_0': [4, 5],
'c0_1': [1, 1], 'c0_2': [2, 2], 'c1_1': [5, 5]}
p2 = models.Polynomial2D(2, **kw)
assert_equal(p2.parameters, [2, 3, 1, 2, 4, 5,
1, 1, 2, 2, 5, 5])
def test_shift_model_parameters1d(self):
sh1 = models.Shift(2)
sh1.offset = 3
assert sh1.offset == 3
assert sh1.offset.value == 3
def test_scale_model_parametersnd(self):
sc1 = models.Scale([2, 2])
sc1.factor = [3, 3]
assert np.all(sc1.factor == [3, 3])
assert_array_equal(sc1.factor.value, [3, 3])
def test_parameters_wrong_shape(self):
sh1 = models.Shift(2)
with pytest.raises(InputParameterError):
sh1.offset = [3, 3]
class TestMultipleParameterSets:
def setup_class(self):
self.x1 = np.arange(1, 10, .1)
self.y, self.x = np.mgrid[:10, :7]
self.x11 = np.array([self.x1, self.x1]).T
self.gmodel = models.Gaussian1D([12, 10], [3.5, 5.2], stddev=[.4, .7],
n_models=2)
def test_change_par(self):
"""
Test that a change to one parameter as a set propagates to param_sets.
"""
self.gmodel.amplitude = [1, 10]
assert_almost_equal(
self.gmodel.param_sets,
np.array([[1.,
10],
[3.5,
5.2],
[0.4,
0.7]]))
np.all(self.gmodel.parameters == [1.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_par2(self):
"""
Test that a change to one single parameter in a set propagates to
param_sets.
"""
self.gmodel.amplitude[0] = 11
assert_almost_equal(
self.gmodel.param_sets,
np.array([[11.,
10],
[3.5,
5.2],
[0.4,
0.7]]))
np.all(self.gmodel.parameters == [11.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_parameters(self):
self.gmodel.parameters = [13, 10, 9, 5.2, 0.4, 0.7]
assert_almost_equal(self.gmodel.amplitude.value, [13., 10.])
assert_almost_equal(self.gmodel.mean.value, [9., 5.2])
class TestParameterInitialization:
"""
This suite of tests checks most if not all cases if instantiating a model
with parameters of different shapes/sizes and with different numbers of
parameter sets.
"""
def test_single_model_scalar_parameters(self):
t = TParModel(10, 1)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[10], [1]])
assert np.all(t.parameters == [10, 1])
assert t.coeff.shape == ()
assert t.e.shape == ()
def test_single_model_scalar_and_array_parameters(self):
t = TParModel(10, [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert len(t.param_sets) == 2
assert np.all(t.param_sets[0] == [10])
assert np.all(t.param_sets[1] == [[1, 2]])
assert np.all(t.parameters == [10, 1, 2])
assert t.coeff.shape == ()
assert t.e.shape == (2,)
def test_single_model_1d_array_parameters(self):
t = TParModel([10, 20], [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[10, 20]], [[1, 2]]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
def test_single_model_1d_array_different_length_parameters(self):
with pytest.raises(InputParameterError):
# Not broadcastable
t = TParModel([1, 2], [3, 4, 5])
def test_single_model_2d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[[10, 20], [30, 40]]],
[[[1, 2], [3, 4]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
def test_single_model_2d_non_square_parameters(self):
coeff = np.array([[10, 20], [30, 40], [50, 60]])
e = np.array([[1, 2], [3, 4], [5, 6]])
t = TParModel(coeff, e)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[[10, 20], [30, 40], [50, 60]]],
[[[1, 2], [3, 4], [5, 6]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60,
1, 2, 3, 4, 5, 6])
assert t.coeff.shape == (3, 2)
assert t.e.shape == (3, 2)
t2 = TParModel(coeff.T, e.T)
assert len(t2) == 1
assert t2.model_set_axis is False
assert np.all(t2.param_sets == [[[[10, 30, 50], [20, 40, 60]]],
[[[1, 3, 5], [2, 4, 6]]]])
assert np.all(t2.parameters == [10, 30, 50, 20, 40, 60,
1, 3, 5, 2, 4, 6])
assert t2.coeff.shape == (2, 3)
assert t2.e.shape == (2, 3)
# Not broadcastable
with pytest.raises(InputParameterError):
TParModel(coeff, e.T)
with pytest.raises(InputParameterError):
TParModel(coeff.T, e)
def test_single_model_2d_broadcastable_parameters(self):
t = TParModel([[10, 20, 30], [40, 50, 60]], [1, 2, 3])
assert len(t) == 1
assert t.model_set_axis is False
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 20, 30], [40, 50, 60]]])
assert np.all(t.param_sets[1] == [[1, 2, 3]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3])
@pytest.mark.parametrize(('p1', 'p2'), [
(1, 2), (1, [2, 3]), ([1, 2], 3), ([1, 2, 3], [4, 5]),
([1, 2], [3, 4, 5])])
def test_two_model_incorrect_scalar_parameters(self, p1, p2):
with pytest.raises(InputParameterError):
TParModel(p1, p2, n_models=2)
@pytest.mark.parametrize('kwargs', [
{'n_models': 2}, {'model_set_axis': 0},
{'n_models': 2, 'model_set_axis': 0}])
def test_two_model_scalar_parameters(self, kwargs):
t = TParModel([10, 20], [1, 2], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[10, 20], [1, 2]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == ()
assert t.e.shape == ()
@pytest.mark.parametrize('kwargs', [
{'n_models': 2}, {'model_set_axis': 0},
{'n_models': 2, 'model_set_axis': 0}])
def test_two_model_scalar_and_array_parameters(self, kwargs):
t = TParModel([10, 20], [[1, 2], [3, 4]], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[10], [20]])
assert np.all(t.param_sets[1] == [[1, 2], [3, 4]])
assert np.all(t.parameters == [10, 20, 1, 2, 3, 4])
assert t.coeff.shape == ()
assert t.e.shape == (2,)
def test_two_model_1d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[[10, 20], [30, 40]],
[[1, 2], [3, 4]]])
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
t2 = TParModel([[10, 20, 30], [40, 50, 60]],
[[1, 2, 3], [4, 5, 6]], n_models=2)
assert len(t2) == 2
assert t2.model_set_axis == 0
assert np.all(t2.param_sets == [[[10, 20, 30], [40, 50, 60]],
[[1, 2, 3], [4, 5, 6]]])
assert np.all(t2.parameters == [10, 20, 30, 40, 50, 60,
1, 2, 3, 4, 5, 6])
assert t2.coeff.shape == (3,)
assert t2.e.shape == (3,)
def test_two_model_mixed_dimension_array_parameters(self):
with pytest.raises(InputParameterError):
# Can't broadcast different array shapes
TParModel([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[9, 10, 11], [12, 13, 14]], n_models=2)
t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[1, 2], [3, 4]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 20], [30, 40]],
[[50, 60], [70, 80]]])
assert np.all(t.param_sets[1] == [[[1, 2]], [[3, 4]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80,
1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2,)
def test_two_model_2d_array_parameters(self):
t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[[[10, 20], [30, 40]],
[[50, 60], [70, 80]]],
[[[1, 2], [3, 4]],
[[5, 6], [7, 8]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80,
1, 2, 3, 4, 5, 6, 7, 8])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
def test_two_model_nonzero_model_set_axis(self):
# An example where the model set axis is the *last* axis of the
# parameter arrays
coeff = np.array([[[10, 20, 30], [30, 40, 50]], [[50, 60, 70], [70, 80, 90]]])
coeff = np.rollaxis(coeff, 0, 3)
e = np.array([[1, 2, 3], [3, 4, 5]])
e = np.rollaxis(e, 0, 2)
t = TParModel(coeff, e, n_models=2, model_set_axis=-1)
assert len(t) == 2
assert t.model_set_axis == -1
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 50], [20, 60], [30, 70]],
[[30, 70], [40, 80], [50, 90]]])
assert np.all(t.param_sets[1] == [[[1, 3], [2, 4], [3, 5]]])
assert np.all(t.parameters == [10, 50, 20, 60, 30, 70, 30, 70, 40, 80,
50, 90, 1, 3, 2, 4, 3, 5])
assert t.coeff.shape == (2, 3)
assert t.e.shape == (3,)
def test_wrong_number_of_params(self):
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), n_models=2)
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), model_set_axis=0)
def test_wrong_number_of_params2(self):
with pytest.raises(InputParameterError):
m = TParModel(coeff=[[1, 2], [3, 4]], e=4, n_models=2)
with pytest.raises(InputParameterError):
m = TParModel(coeff=[[1, 2], [3, 4]], e=4, model_set_axis=0)
def test_array_parameter1(self):
with pytest.raises(InputParameterError):
t = TParModel(np.array([[1, 2], [3, 4]]), 1, model_set_axis=0)
def test_array_parameter2(self):
with pytest.raises(InputParameterError):
m = TParModel(np.array([[1, 2], [3, 4]]), (1, 1, 11),
model_set_axis=0)
def test_array_parameter4(self):
"""
Test multiple parameter model with array-valued parameters of the same
size as the number of parameter sets.
"""
t4 = TParModel([[1, 2], [3, 4]], [5, 6], model_set_axis=False)
assert len(t4) == 1
assert t4.coeff.shape == (2, 2)
assert t4.e.shape == (2,)
assert np.issubdtype(t4.param_sets.dtype, np.object_)
assert np.all(t4.param_sets[0] == [[1, 2], [3, 4]])
assert np.all(t4.param_sets[1] == [5, 6])
def test_non_broadcasting_parameters():
"""
Tests that in a model with 3 parameters that do not all mutually broadcast,
this is determined correctly regardless of what order the parameters are
in.
"""
a = 3
b = np.array([[1, 2, 3], [4, 5, 6]])
c = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
class TestModel(Model):
p1 = Parameter()
p2 = Parameter()
p3 = Parameter()
def evaluate(self, *args):
return
# a broadcasts with both b and c, but b does not broadcast with c
for args in itertools.permutations((a, b, c)):
with pytest.raises(InputParameterError):
TestModel(*args)
def test_setter():
pars = np.random.rand(20).reshape((10, 2))
model = SetterModel(-1, 3, np.pi)
for x, y in pars:
model.x = x
model.y = y
assert_almost_equal(model(x, y), (x + 1)**2 + (y - np.pi * 3)**2)
|
83c6b00401566f6e7aeebf79bc96a220c6bebdc63a066dce2c5f82eedd954fcb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from math import cos, sin
import pytest
import numpy as np
from numpy.testing import assert_allclose
from .. import models
from ...wcs import wcs
@pytest.mark.parametrize(('inp'), [(0, 0), (4000, -20.56), (-2001.5, 45.9),
(0, 90), (0, -90), (np.mgrid[:4, :6])])
def test_against_wcslib(inp):
w = wcs.WCS()
crval = [202.4823228, 47.17511893]
w.wcs.crval = crval
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
lonpole = 180
tan = models.Pix2Sky_TAN()
n2c = models.RotateNative2Celestial(crval[0], crval[1], lonpole)
c2n = models.RotateCelestial2Native(crval[0], crval[1], lonpole)
m = tan | n2c
minv = c2n | tan.inverse
radec = w.wcs_pix2world(inp[0], inp[1], 1)
xy = w.wcs_world2pix(radec[0], radec[1], 1)
assert_allclose(m(*inp), radec, atol=1e-12)
assert_allclose(minv(*radec), xy, atol=1e-12)
@pytest.mark.parametrize(('inp'), [(0, 0), (40, -20.56), (21.5, 45.9)])
def test_roundtrip_sky_rotaion(inp):
lon, lat, lon_pole = 42, 43, 44
n2c = models.RotateNative2Celestial(lon, lat, lon_pole)
c2n = models.RotateCelestial2Native(lon, lat, lon_pole)
assert_allclose(n2c.inverse(*n2c(*inp)), inp, atol=1e-13)
assert_allclose(c2n.inverse(*c2n(*inp)), inp, atol=1e-13)
def test_native_celestial_lat90():
n2c = models.RotateNative2Celestial(1, 90, 0)
alpha, delta = n2c(1, 1)
assert_allclose(delta, 1)
assert_allclose(alpha, 182)
def test_Rotation2D():
model = models.Rotation2D(angle=90)
x, y = model(1, 0)
assert_allclose([x, y], [0, 1], atol=1e-10)
def test_Rotation2D_inverse():
model = models.Rotation2D(angle=234.23494)
x, y = model.inverse(*model(1, 0))
assert_allclose([x, y], [1, 0], atol=1e-10)
def test_euler_angle_rotations():
x = (0, 0)
y = (90, 0)
z = (0, 90)
negx = (180, 0)
negy = (-90, 0)
# rotate y into minus z
model = models.EulerAngleRotation(0, 90, 0, 'zxz')
assert_allclose(model(*z), y, atol=10**-12)
# rotate z into minus x
model = models.EulerAngleRotation(0, 90, 0, 'zyz')
assert_allclose(model(*z), negx, atol=10**-12)
# rotate x into minus y
model = models.EulerAngleRotation(0, 90, 0, 'yzy')
assert_allclose(model(*x), negy, atol=10**-12)
euler_axes_order = ['zxz', 'zyz', 'yzy', 'yxy', 'xyx', 'xzx']
@pytest.mark.parametrize(('axes_order'), euler_axes_order)
def test_euler_angles(axes_order):
"""
Tests against all Euler sequences.
The rotation matrices definitions come from Wikipedia.
"""
phi = np.deg2rad(23.4)
theta = np.deg2rad(12.2)
psi = np.deg2rad(34)
c1 = cos(phi)
c2 = cos(theta)
c3 = cos(psi)
s1 = sin(phi)
s2 = sin(theta)
s3 = sin(psi)
matrices = {'zxz': np.array([[(c1*c3 - c2*s1*s3), (-c1*s3 - c2*c3*s1), (s1*s2)],
[(c3*s1 + c1*c2*s3), (c1*c2*c3 - s1*s3), (-c1*s2)],
[(s2*s3), (c3*s2), (c2)]]),
'zyz': np.array([[(c1*c2*c3 - s1*s3), (-c3*s1 - c1*c2*s3), (c1*s2)],
[(c1*s3 + c2*c3*s1), (c1*c3 - c2*s1*s3), (s1*s2)],
[(-c3*s2), (s2*s3), (c2)]]),
'yzy': np.array([[(c1*c2*c3 - s1*s3), (-c1*s2), (c3*s1+c1*c2*s3)],
[(c3*s2), (c2), (s2*s3)],
[(-c1*s3 - c2*c3*s1), (s1*s2), (c1*c3-c2*s1*s3)]]),
'yxy': np.array([[(c1*c3 - c2*s1*s3), (s1*s2), (c1*s3+c2*c3*s1)],
[(s2*s3), (c2), (-c3*s2)],
[(-c3*s1 - c1*c2*s3), (c1*s2), (c1*c2*c3 - s1*s3)]]),
'xyx': np.array([[(c2), (s2*s3), (c3*s2)],
[(s1*s2), (c1*c3 - c2*s1*s3), (-c1*s3 - c2*c3*s1)],
[(-c1*s2), (c3*s1 + c1*c2*s3), (c1*c2*c3 - s1*s3)]]),
'xzx': np.array([[(c2), (-c3*s2), (s2*s3)],
[(c1*s2), (c1*c2*c3 - s1*s3), (-c3*s1 - c1*c2*s3)],
[(s1*s2), (c1*s3 + c2*c3*s1), (c1*c3 - c2*s1*s3)]])
}
model = models.EulerAngleRotation(23.4, 12.2, 34, axes_order)
mat = model._create_matrix(phi, theta, psi, axes_order)
assert_allclose(mat.T, matrices[axes_order]) # get_rotation_matrix(axes_order))
|
674d2a7276b9ce7ee3f2f73a70635fb8292b8b907ba4df1d36bdb77f0ea340c3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import types
import pytest
import numpy as np
from numpy.testing import assert_allclose
from numpy.random import RandomState
from ..core import Fittable1DModel
from ..parameters import Parameter
from .. import models
from .. import fitting
from .utils import ignore_non_integer_warning
try:
from scipy import optimize
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
class TestNonLinearConstraints:
def setup_class(self):
self.g1 = models.Gaussian1D(10, 14.9, stddev=.3)
self.g2 = models.Gaussian1D(10, 13, stddev=.4)
self.x = np.arange(10, 20, .1)
self.y1 = self.g1(self.x)
self.y2 = self.g2(self.x)
rsn = RandomState(1234567890)
self.n = rsn.randn(100)
self.ny1 = self.y1 + 2 * self.n
self.ny2 = self.y2 + 2 * self.n
@pytest.mark.skipif('not HAS_SCIPY')
def test_fixed_par(self):
g1 = models.Gaussian1D(10, mean=14.9, stddev=.3,
fixed={'amplitude': True})
fitter = fitting.LevMarLSQFitter()
model = fitter(g1, self.x, self.ny1)
assert model.amplitude.value == 10
@pytest.mark.skipif('not HAS_SCIPY')
def test_tied_par(self):
def tied(model):
mean = 50 * model.stddev
return mean
g1 = models.Gaussian1D(10, mean=14.9, stddev=.3, tied={'mean': tied})
fitter = fitting.LevMarLSQFitter()
model = fitter(g1, self.x, self.ny1)
assert_allclose(model.mean.value, 50 * model.stddev,
rtol=10 ** (-5))
@pytest.mark.skipif('not HAS_SCIPY')
def test_joint_fitter(self):
g1 = models.Gaussian1D(10, 14.9, stddev=.3)
g2 = models.Gaussian1D(10, 13, stddev=.4)
jf = fitting.JointFitter([g1, g2], {g1: ['amplitude'],
g2: ['amplitude']}, [9.8])
x = np.arange(10, 20, .1)
y1 = g1(x)
y2 = g2(x)
n = np.random.randn(100)
ny1 = y1 + 2 * n
ny2 = y2 + 2 * n
jf(x, ny1, x, ny2)
p1 = [14.9, .3]
p2 = [13, .4]
A = 9.8
p = np.r_[A, p1, p2]
def compmodel(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errf(p, x1, y1, x2, y2):
return np.ravel(
np.r_[compmodel(p[0], p[1:3], x1) - y1,
compmodel(p[0], p[3:], x2) - y2])
fitparams, _ = optimize.leastsq(errf, p, args=(x, ny1, x, ny2))
assert_allclose(jf.fitparams, fitparams, rtol=10 ** (-5))
assert_allclose(g1.amplitude.value, g2.amplitude.value)
@pytest.mark.skipif('not HAS_SCIPY')
def test_no_constraints(self):
g1 = models.Gaussian1D(9.9, 14.5, stddev=.3)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errf(p, x, y):
return func(p, x) - y
p0 = [9.9, 14.5, 0.3]
y = g1(self.x)
n = np.random.randn(100)
ny = y + n
fitpar, s = optimize.leastsq(errf, p0, args=(self.x, ny))
fitter = fitting.LevMarLSQFitter()
model = fitter(g1, self.x, ny)
assert_allclose(model.parameters, fitpar, rtol=5 * 10 ** (-3))
@pytest.mark.skipif('not HAS_SCIPY')
class TestBounds:
def setup_class(self):
A = -2.0
B = 0.5
self.x = np.linspace(-1.0, 1.0, 100)
self.y = A * self.x + B + np.random.normal(scale=0.1, size=100)
data = np.array([505.0, 556.0, 630.0, 595.0, 561.0, 553.0, 543.0, 496.0, 460.0, 469.0,
426.0, 518.0, 684.0, 798.0, 830.0, 794.0, 649.0, 706.0, 671.0, 545.0,
479.0, 454.0, 505.0, 700.0, 1058.0, 1231.0, 1325.0, 997.0, 1036.0, 884.0,
610.0, 487.0, 453.0, 527.0, 780.0, 1094.0, 1983.0, 1993.0, 1809.0, 1525.0,
1056.0, 895.0, 604.0, 466.0, 510.0, 678.0, 1130.0, 1986.0, 2670.0, 2535.0,
1878.0, 1450.0, 1200.0, 663.0, 511.0, 474.0, 569.0, 848.0, 1670.0, 2611.0,
3129.0, 2507.0, 1782.0, 1211.0, 723.0, 541.0, 511.0, 518.0, 597.0, 1137.0,
1993.0, 2925.0, 2438.0, 1910.0, 1230.0, 738.0, 506.0, 461.0, 486.0, 597.0,
733.0, 1262.0, 1896.0, 2342.0, 1792.0, 1180.0, 667.0, 482.0, 454.0, 482.0,
504.0, 566.0, 789.0, 1194.0, 1545.0, 1361.0, 933.0, 562.0, 418.0, 463.0,
435.0, 466.0, 528.0, 487.0, 664.0, 799.0, 746.0, 550.0, 478.0, 535.0, 443.0,
416.0, 439.0, 472.0, 472.0, 492.0, 523.0, 569.0, 487.0, 441.0, 428.0])
self.data = data.reshape(11, 11)
def test_bounds_lsq(self):
guess_slope = 1.1
guess_intercept = 0.0
bounds = {'slope': (-1.5, 5.0), 'intercept': (-1.0, 1.0)}
line_model = models.Linear1D(guess_slope, guess_intercept,
bounds=bounds)
fitter = fitting.LevMarLSQFitter()
model = fitter(line_model, self.x, self.y)
slope = model.slope.value
intercept = model.intercept.value
assert slope + 10 ** -5 >= bounds['slope'][0]
assert slope - 10 ** -5 <= bounds['slope'][1]
assert intercept + 10 ** -5 >= bounds['intercept'][0]
assert intercept - 10 ** -5 <= bounds['intercept'][1]
def test_bounds_slsqp(self):
guess_slope = 1.1
guess_intercept = 0.0
bounds = {'slope': (-1.5, 5.0), 'intercept': (-1.0, 1.0)}
line_model = models.Linear1D(guess_slope, guess_intercept,
bounds=bounds)
fitter = fitting.SLSQPLSQFitter()
with ignore_non_integer_warning():
model = fitter(line_model, self.x, self.y)
slope = model.slope.value
intercept = model.intercept.value
assert slope + 10 ** -5 >= bounds['slope'][0]
assert slope - 10 ** -5 <= bounds['slope'][1]
assert intercept + 10 ** -5 >= bounds['intercept'][0]
assert intercept - 10 ** -5 <= bounds['intercept'][1]
def test_bounds_gauss2d_lsq(self):
X, Y = np.meshgrid(np.arange(11), np.arange(11))
bounds = {"x_mean": [0., 11.],
"y_mean": [0., 11.],
"x_stddev": [1., 4],
"y_stddev": [1., 4]}
gauss = models.Gaussian2D(amplitude=10., x_mean=5., y_mean=5.,
x_stddev=4., y_stddev=4., theta=0.5,
bounds=bounds)
gauss_fit = fitting.LevMarLSQFitter()
model = gauss_fit(gauss, X, Y, self.data)
x_mean = model.x_mean.value
y_mean = model.y_mean.value
x_stddev = model.x_stddev.value
y_stddev = model.y_stddev.value
assert x_mean + 10 ** -5 >= bounds['x_mean'][0]
assert x_mean - 10 ** -5 <= bounds['x_mean'][1]
assert y_mean + 10 ** -5 >= bounds['y_mean'][0]
assert y_mean - 10 ** -5 <= bounds['y_mean'][1]
assert x_stddev + 10 ** -5 >= bounds['x_stddev'][0]
assert x_stddev - 10 ** -5 <= bounds['x_stddev'][1]
assert y_stddev + 10 ** -5 >= bounds['y_stddev'][0]
assert y_stddev - 10 ** -5 <= bounds['y_stddev'][1]
def test_bounds_gauss2d_slsqp(self):
X, Y = np.meshgrid(np.arange(11), np.arange(11))
bounds = {"x_mean": [0., 11.],
"y_mean": [0., 11.],
"x_stddev": [1., 4],
"y_stddev": [1., 4]}
gauss = models.Gaussian2D(amplitude=10., x_mean=5., y_mean=5.,
x_stddev=4., y_stddev=4., theta=0.5,
bounds=bounds)
gauss_fit = fitting.SLSQPLSQFitter()
with ignore_non_integer_warning():
model = gauss_fit(gauss, X, Y, self.data)
x_mean = model.x_mean.value
y_mean = model.y_mean.value
x_stddev = model.x_stddev.value
y_stddev = model.y_stddev.value
assert x_mean + 10 ** -5 >= bounds['x_mean'][0]
assert x_mean - 10 ** -5 <= bounds['x_mean'][1]
assert y_mean + 10 ** -5 >= bounds['y_mean'][0]
assert y_mean - 10 ** -5 <= bounds['y_mean'][1]
assert x_stddev + 10 ** -5 >= bounds['x_stddev'][0]
assert x_stddev - 10 ** -5 <= bounds['x_stddev'][1]
assert y_stddev + 10 ** -5 >= bounds['y_stddev'][0]
assert y_stddev - 10 ** -5 <= bounds['y_stddev'][1]
class TestLinearConstraints:
def setup_class(self):
self.p1 = models.Polynomial1D(4)
self.p1.c0 = 0
self.p1.c1 = 0
self.p1.window = [0., 9.]
self.x = np.arange(10)
self.y = self.p1(self.x)
rsn = RandomState(1234567890)
self.n = rsn.randn(10)
self.ny = self.y + self.n
def test(self):
self.p1.c0.fixed = True
self.p1.c1.fixed = True
pfit = fitting.LinearLSQFitter()
model = pfit(self.p1, self.x, self.y)
assert_allclose(self.y, model(self.x))
# Test constraints as parameter properties
def test_set_fixed_1():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.mean.fixed = True
assert gauss.fixed == {'amplitude': False, 'mean': True, 'stddev': False}
def test_set_fixed_2():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
fixed={'mean': True})
assert gauss.mean.fixed is True
def test_set_tied_1():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.amplitude.tied = tie_amplitude
assert gauss.amplitude.tied is not False
assert isinstance(gauss.tied['amplitude'], types.FunctionType)
def test_set_tied_2():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
tied={'amplitude': tie_amplitude})
assert gauss.amplitude.tied
def test_unset_fixed():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
fixed={'mean': True})
gauss.mean.fixed = False
assert gauss.fixed == {'amplitude': False, 'mean': False, 'stddev': False}
def test_unset_tied():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
tied={'amplitude': tie_amplitude})
gauss.amplitude.tied = False
assert gauss.tied == {'amplitude': False, 'mean': False, 'stddev': False}
def test_set_bounds_1():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
bounds={'stddev': (0, None)})
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (0.0, None)}
def test_set_bounds_2():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.stddev.min = 0.
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (0.0, None)}
def test_unset_bounds():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
bounds={'stddev': (0, 2)})
gauss.stddev.min = None
gauss.stddev.max = None
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (None, None)}
def test_default_constraints():
"""Regression test for https://github.com/astropy/astropy/issues/2396
Ensure that default constraints defined on parameters are carried through
to instances of the models those parameters are defined for.
"""
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=0, min=0, fixed=True)
@staticmethod
def evaluate(x, a, b):
return x * a + b
assert MyModel.a.default == 1
assert MyModel.b.default == 0
assert MyModel.b.min == 0
assert MyModel.b.bounds == (0, None)
assert MyModel.b.fixed is True
m = MyModel()
assert m.a.value == 1
assert m.b.value == 0
assert m.b.min == 0
assert m.b.bounds == (0, None)
assert m.b.fixed is True
assert m.bounds == {'a': (None, None), 'b': (0, None)}
assert m.fixed == {'a': False, 'b': True}
# Make a model instance that overrides the default constraints and values
m = MyModel(3, 4, bounds={'a': (1, None), 'b': (2, None)},
fixed={'a': True, 'b': False})
assert m.a.value == 3
assert m.b.value == 4
assert m.a.min == 1
assert m.b.min == 2
assert m.a.bounds == (1, None)
assert m.b.bounds == (2, None)
assert m.a.fixed is True
assert m.b.fixed is False
assert m.bounds == {'a': (1, None), 'b': (2, None)}
assert m.fixed == {'a': True, 'b': False}
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_with_fixed_and_bound_constraints():
"""
Regression test for https://github.com/astropy/astropy/issues/2235
Currently doesn't test that the fit is any *good*--just that parameters
stay within their given constraints.
"""
m = models.Gaussian1D(amplitude=3, mean=4, stddev=1,
bounds={'mean': (4, 5)},
fixed={'amplitude': True})
x = np.linspace(0, 10, 10)
y = np.exp(-x ** 2 / 2)
f = fitting.LevMarLSQFitter()
fitted_1 = f(m, x, y)
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5
assert fitted_1.amplitude == 3.0
m.amplitude.fixed = False
fitted_2 = f(m, x, y)
# It doesn't matter anymore what the amplitude ends up as so long as the
# bounds constraint was still obeyed
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_with_bound_constraints_estimate_jacobian():
"""
Regression test for https://github.com/astropy/astropy/issues/2400
Checks that bounds constraints are obeyed on a custom model that does not
define fit_deriv (and thus its Jacobian must be estimated for non-linear
fitting).
"""
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=2)
@staticmethod
def evaluate(x, a, b):
return a * x + b
m_real = MyModel(a=1.5, b=-3)
x = np.arange(100)
y = m_real(x)
m = MyModel()
f = fitting.LevMarLSQFitter()
fitted_1 = f(m, x, y)
# This fit should be trivial so even without constraints on the bounds it
# should be right
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
m2 = MyModel()
m2.a.bounds = (-2, 2)
f2 = fitting.LevMarLSQFitter()
fitted_2 = f2(m2, x, y)
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
# Check that the estimated Jacobian was computed (it doesn't matter what
# the values are so long as they're not all zero.
assert np.any(f2.fit_info['fjac'] != 0)
# https://github.com/astropy/astropy/issues/6014
@pytest.mark.skipif('not HAS_SCIPY')
def test_gaussian2d_positive_stddev():
# This is 2D Gaussian with noise to be fitted, as provided by @ysBach
test = [
[-54.33, 13.81, -34.55, 8.95, -143.71, -0.81, 59.25, -14.78, -204.9,
-30.87, -124.39, 123.53, 70.81, -109.48, -106.77, 35.64, 18.29],
[-126.19, -89.13, 63.13, 50.74, 61.83, 19.06, 65.7, 77.94, 117.14,
139.37, 52.57, 236.04, 100.56, 242.28, -180.62, 154.02, -8.03],
[91.43, 96.45, -118.59, -174.58, -116.49, 80.11, -86.81, 14.62, 79.26,
7.56, 54.99, 260.13, -136.42, -20.77, -77.55, 174.52, 134.41],
[33.88, 7.63, 43.54, 70.99, 69.87, 33.97, 273.75, 176.66, 201.94,
336.34, 340.54, 163.77, -156.22, 21.49, -148.41, 94.88, 42.55],
[82.28, 177.67, 26.81, 17.66, 47.81, -31.18, 353.23, 589.11, 553.27,
242.35, 444.12, 186.02, 140.73, 75.2, -87.98, -18.23, 166.74],
[113.09, -37.01, 134.23, 71.89, 107.88, 198.69, 273.88, 626.63, 551.8,
547.61, 580.35, 337.8, 139.8, 157.64, -1.67, -26.99, 37.35],
[106.47, 31.97, 84.99, -125.79, 195.0, 493.65, 861.89, 908.31, 803.9,
781.01, 532.59, 404.67, 115.18, 111.11, 28.08, 122.05, -58.36],
[183.62, 45.22, 40.89, 111.58, 425.81, 321.53, 545.09, 866.02, 784.78,
731.35, 609.01, 405.41, -19.65, 71.2, -140.5, 144.07, 25.24],
[137.13, -86.95, 15.39, 180.14, 353.23, 699.01, 1033.8, 1014.49,
814.11, 647.68, 461.03, 249.76, 94.8, 41.17, -1.16, 183.76, 188.19],
[35.39, 26.92, 198.53, -37.78, 638.93, 624.41, 816.04, 867.28, 697.0,
491.56, 378.21, -18.46, -65.76, 98.1, 12.41, -102.18, 119.05],
[190.73, 125.82, 311.45, 369.34, 554.39, 454.37, 755.7, 736.61, 542.43,
188.24, 214.86, 217.91, 7.91, 27.46, -172.14, -82.36, -80.31],
[-55.39, 80.18, 267.19, 274.2, 169.53, 327.04, 488.15, 437.53, 225.38,
220.94, 4.01, -92.07, 39.68, 57.22, 144.66, 100.06, 34.96],
[130.47, -4.23, 46.3, 101.49, 115.01, 217.38, 249.83, 115.9, 87.36,
105.81, -47.86, -9.94, -82.28, 144.45, 83.44, 23.49, 183.9],
[-110.38, -115.98, 245.46, 103.51, 255.43, 163.47, 56.52, 33.82,
-33.26, -111.29, 88.08, 193.2, -100.68, 15.44, 86.32, -26.44, -194.1],
[109.36, 96.01, -124.89, -16.4, 84.37, 114.87, -65.65, -58.52, -23.22,
42.61, 144.91, -209.84, 110.29, 66.37, -117.85, -147.73, -122.51],
[10.94, 45.98, 118.12, -46.53, -72.14, -74.22, 21.22, 0.39, 86.03,
23.97, -45.42, 12.05, -168.61, 27.79, 61.81, 84.07, 28.79],
[46.61, -104.11, 56.71, -90.85, -16.51, -66.45, -141.34, 0.96, 58.08,
285.29, -61.41, -9.01, -323.38, 58.35, 80.14, -101.22, 145.65]]
g_init = models.Gaussian2D(x_mean=8, y_mean=8)
fitter = fitting.LevMarLSQFitter()
y, x = np.mgrid[:17, :17]
g_fit = fitter(g_init, x, y, test)
# Compare with @ysBach original result:
# - x_stddev was negative, so its abs value is used for comparison here.
# - theta is beyond (-90, 90) deg, which doesn't make sense, so ignored.
assert_allclose([g_fit.amplitude.value, g_fit.y_stddev.value],
[984.7694929790363, 3.1840618351417307], rtol=1.5e-6)
assert_allclose(g_fit.x_mean.value, 7.198391516587464)
assert_allclose(g_fit.y_mean.value, 7.49720660088511, rtol=5e-7)
assert_allclose(g_fit.x_stddev.value, 1.9840185107597297, rtol=2e-6)
# Issue #6403
@pytest.mark.skipif('not HAS_SCIPY')
def test_2d_model():
# 2D model with LevMarLSQFitter
gauss2d = models.Gaussian2D(10.2, 4.3, 5, 2, 1.2, 1.4)
fitter = fitting.LevMarLSQFitter()
X = np.linspace(-1, 7, 200)
Y = np.linspace(-1, 7, 200)
x, y = np.meshgrid(X, Y)
z = gauss2d(x, y)
w = np.ones(x.size)
w.shape = x.shape
from ...utils import NumpyRNGContext
with NumpyRNGContext(1234567890):
n = np.random.randn(x.size)
n.shape = x.shape
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# 2D model with LevMarLSQFitter, fixed constraint
gauss2d.x_stddev.fixed = True
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False
p2 = models.Polynomial2D(1, c0_0=1, c1_0=1.2, c0_1=3.2)
z = p2(x, y)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False, fixed constraint
p2.c1_0.fixed = True
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
|
d895872524e76ef64758e915e3c28a834465b7b7b97ffb8df3adc80e041f1590 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from ..fitting import LevMarLSQFitter
from ..models import Shift, Rotation2D, Gaussian1D, Identity, Mapping
from ...utils import NumpyRNGContext
try:
from scipy import optimize # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
def test_swap_axes():
x = np.zeros((2, 3))
y = np.ones((2, 3))
mapping = Mapping((1, 0))
assert(mapping(1, 2) == (2.0, 1.0))
assert(mapping.inverse(2, 1) == (1, 2))
assert_array_equal(mapping(x, y), (y, x))
assert_array_equal(mapping.inverse(y, x), (x, y))
def test_duplicate_axes():
mapping = Mapping((0, 1, 0, 1))
assert(mapping(1, 2) == (1.0, 2., 1., 2))
assert(mapping.inverse(1, 2, 1, 2) == (1, 2))
assert(mapping.inverse.n_inputs == 4)
assert(mapping.inverse.n_outputs == 2)
def test_drop_axes_1():
mapping = Mapping((0,), n_inputs=2)
assert(mapping(1, 2) == (1.))
def test_drop_axes_2():
mapping = Mapping((1, ))
assert(mapping(1, 2) == (2.))
with pytest.raises(NotImplementedError):
mapping.inverse
def test_drop_axes_3():
mapping = Mapping((1,), n_inputs=2)
assert(mapping.n_inputs == 2)
rotation = Rotation2D(60)
model = rotation | mapping
assert_allclose(model(1, 2), 1.86602540378)
def test_identity():
x = np.zeros((2, 3))
y = np.ones((2, 3))
ident1 = Identity(1)
shift = Shift(1)
rotation = Rotation2D(angle=60)
model = ident1 & shift | rotation
assert_allclose(model(1, 2), (-2.098076211353316, 2.3660254037844393))
res_x, res_y = model(x, y)
assert_allclose((res_x, res_y),
(np.array([[-1.73205081, -1.73205081, -1.73205081],
[-1.73205081, -1.73205081, -1.73205081]]),
np.array([[1., 1., 1.],
[1., 1., 1.]])))
assert_allclose(model.inverse(res_x, res_y), (x, y), atol=1.e-10)
# https://github.com/astropy/astropy/pull/6018
@pytest.mark.skipif('not HAS_SCIPY')
def test_fittable_compound():
m = Identity(1) | Mapping((0, )) | Gaussian1D(1, 5, 4)
x = np.arange(10)
y_real = m(x)
dy = 0.005
with NumpyRNGContext(1234567):
n = np.random.normal(0., dy, x.shape)
y_noisy = y_real + n
pfit = LevMarLSQFitter()
new_model = pfit(m, x, y_noisy)
y_fit = new_model(x)
assert_allclose(y_fit, y_real, atol=dy)
|
f5aa2cd401bf6144319105518e2530b649ee5fbbb2a3d7e50bf15f70e35214e3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import json
import os
from datetime import datetime
import locale
import pytest
import numpy as np
from .. import data, misc
def test_isiterable():
assert misc.isiterable(2) is False
assert misc.isiterable([2]) is True
assert misc.isiterable([1, 2, 3]) is True
assert misc.isiterable(np.array(2)) is False
assert misc.isiterable(np.array([1, 2, 3])) is True
def test_signal_number_to_name_no_failure():
# Regression test for #5340: ensure signal_number_to_name throws no
# AttributeError (it used ".iteritems()" which was removed in Python3).
misc.signal_number_to_name(0)
@pytest.mark.remote_data
def test_api_lookup():
strurl = misc.find_api_page('astropy.utils.misc', 'dev', False, timeout=3)
objurl = misc.find_api_page(misc, 'dev', False, timeout=3)
assert strurl == objurl
assert strurl == 'http://devdocs.astropy.org/utils/index.html#module-astropy.utils.misc'
def test_skip_hidden():
path = data._find_pkg_data_path('data')
for root, dirs, files in os.walk(path):
assert '.hidden_file.txt' in files
assert 'local.dat' in files
# break after the first level since the data dir contains some other
# subdirectories that don't have these files
break
for root, dirs, files in misc.walk_skip_hidden(path):
assert '.hidden_file.txt' not in files
assert 'local.dat' in files
break
def test_JsonCustomEncoder():
from ... import units as u
assert json.dumps(np.arange(3), cls=misc.JsonCustomEncoder) == '[0, 1, 2]'
assert json.dumps(1+2j, cls=misc.JsonCustomEncoder) == '[1.0, 2.0]'
assert json.dumps(set([1, 2, 1]), cls=misc.JsonCustomEncoder) == '[1, 2]'
assert json.dumps(b'hello world \xc3\x85',
cls=misc.JsonCustomEncoder) == '"hello world \\u00c5"'
assert json.dumps({1: 2},
cls=misc.JsonCustomEncoder) == '{"1": 2}' # default
assert json.dumps({1: u.m}, cls=misc.JsonCustomEncoder) == '{"1": "m"}'
# Quantities
tmp = json.dumps({'a': 5*u.cm}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp)
tmpd = {"a": {"unit": "cm", "value": 5.0}}
assert newd == tmpd
tmp2 = json.dumps({'a': np.arange(2)*u.cm}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp2)
tmpd = {"a": {"unit": "cm", "value": [0., 1.]}}
assert newd == tmpd
tmp3 = json.dumps({'a': np.arange(2)*u.erg/u.s}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp3)
tmpd = {"a": {"unit": "erg / s", "value": [0., 1.]}}
assert newd == tmpd
def test_inherit_docstrings():
class Base(metaclass=misc.InheritDocstrings):
def __call__(self, *args):
"FOO"
pass
@property
def bar(self):
"BAR"
pass
class Subclass(Base):
def __call__(self, *args):
pass
@property
def bar(self):
return 42
if Base.__call__.__doc__ is not None:
# TODO: Maybe if __doc__ is None this test should be skipped instead?
assert Subclass.__call__.__doc__ == "FOO"
if Base.bar.__doc__ is not None:
assert Subclass.bar.__doc__ == "BAR"
def test_set_locale():
# First, test if the required locales are available
current = locale.setlocale(locale.LC_ALL)
try:
locale.setlocale(locale.LC_ALL, str('en_US'))
locale.setlocale(locale.LC_ALL, str('de_DE'))
except locale.Error as e:
pytest.skip('Locale error: {}'.format(e))
finally:
locale.setlocale(locale.LC_ALL, current)
date = datetime(2000, 10, 1, 0, 0, 0)
day_mon = date.strftime('%a, %b')
with misc.set_locale('en_US'):
assert date.strftime('%a, %b') == 'Sun, Oct'
with misc.set_locale('de_DE'):
assert date.strftime('%a, %b') == 'So, Okt'
# Back to original
assert date.strftime('%a, %b') == day_mon
with misc.set_locale(current):
assert date.strftime('%a, %b') == day_mon
def test_check_broadcast():
assert misc.check_broadcast((10, 1), (3,)) == (10, 3)
assert misc.check_broadcast((10, 1), (3,), (4, 1, 1, 3)) == (4, 1, 10, 3)
with pytest.raises(ValueError):
misc.check_broadcast((10, 2), (3,))
with pytest.raises(ValueError):
misc.check_broadcast((10, 1), (3,), (4, 1, 2, 3))
def test_dtype_bytes_or_chars():
assert misc.dtype_bytes_or_chars(np.dtype(np.float64)) == 8
assert misc.dtype_bytes_or_chars(np.dtype(object)) is None
assert misc.dtype_bytes_or_chars(np.dtype(np.int32)) == 4
assert misc.dtype_bytes_or_chars(np.array(b'12345').dtype) == 5
assert misc.dtype_bytes_or_chars(np.array(u'12345').dtype) == 5
|
26b81f456cf2f8ce600f0954b3c259b87dc4bd7ffbff598dff0a0f8de9346686 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a collection of monkey patches and workarounds for bugs in
earlier versions of Numpy.
"""
from ...utils import minversion
__all__ = ['NUMPY_LT_1_10_4', 'NUMPY_LT_1_11', 'NUMPY_LT_1_11_2',
'NUMPY_LT_1_12', 'NUMPY_LT_1_13', 'NUMPY_LT_1_14',
'NUMPY_LT_1_14_1', 'NUMPY_LT_1_14_2']
# TODO: It might also be nice to have aliases to these named for specific
# features/bugs we're checking for (ex:
# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)
NUMPY_LT_1_10_4 = not minversion('numpy', '1.10.4')
NUMPY_LT_1_11 = not minversion('numpy', '1.11.0')
NUMPY_LT_1_11_2 = not minversion('numpy', '1.11.2')
NUMPY_LT_1_12 = not minversion('numpy', '1.12')
NUMPY_LT_1_13 = not minversion('numpy', '1.13')
NUMPY_LT_1_14 = not minversion('numpy', '1.14')
NUMPY_LT_1_14_1 = not minversion('numpy', '1.14.1')
NUMPY_LT_1_14_2 = not minversion('numpy', '1.14.2')
|
b2c89e920aaaec62f3089e091c58b9a4f8956930da99276448b6957897c95630 | # -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2017
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = '3.10'
__tabversion__ = '3.10'
import re
import sys
import types
import copy
import os
import inspect
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self, message, s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def critical(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re, findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = 'INITIAL' # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexstateeoff = {} # Dictionary of eof functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lexeoff = None # EOF rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = '' # Ignored characters
self.lexliterals = '' # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = False # Optimized mode
def clone(self, object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = {}
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object, f[0].__name__), f[1]))
newre.append((cre, newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = {}
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object, ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self, lextab, outputdir=''):
if isinstance(lextab, types.ModuleType):
raise IOError("Won't overwrite existing lextab module")
basetabmodule = lextab.split('.')[-1]
filename = os.path.join(outputdir, basetabmodule) + '.py'
with open(filename, 'w') as tf:
tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
tf.write('_tabversion = %s\n' % repr(__tabversion__))
tf.write('_lextokens = set(%s)\n' % repr(tuple(self.lextokens)))
tf.write('_lexreflags = %s\n' % repr(self.lexreflags))
tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
# Rewrite the lexstatere table, replacing function objects with function names
tabre = {}
for statename, lre in self.lexstatere.items():
titem = []
for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
titem.append((retext, _funcs_to_names(func, renames)))
tabre[statename] = titem
tf.write('_lexstatere = %s\n' % repr(tabre))
tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
taberr = {}
for statename, ef in self.lexstateerrorf.items():
taberr[statename] = ef.__name__ if ef else None
tf.write('_lexstateerrorf = %s\n' % repr(taberr))
tabeof = {}
for statename, ef in self.lexstateeoff.items():
tabeof[statename] = ef.__name__ if ef else None
tf.write('_lexstateeoff = %s\n' % repr(tabeof))
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self, tabfile, fdict):
if isinstance(tabfile, types.ModuleType):
lextab = tabfile
else:
exec('import %s' % tabfile)
lextab = sys.modules[tabfile]
if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
raise ImportError('Inconsistent PLY version')
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lextokens_all = self.lextokens | set(self.lexliterals)
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = {}
self.lexstateretext = {}
for statename, lre in lextab._lexstatere.items():
titem = []
txtitem = []
for pat, func_name in lre:
titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict)))
self.lexstatere[statename] = titem
self.lexstateretext[statename] = txtitem
self.lexstateerrorf = {}
for statename, ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[statename] = fdict[ef]
self.lexstateeoff = {}
for statename, ef in lextab._lexstateeoff.items():
self.lexstateeoff[statename] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self, s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c, StringTypes):
raise ValueError('Expected a string')
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self, state):
if state not in self.lexstatere:
raise ValueError('Undefined state')
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state, '')
self.lexerrorf = self.lexstateerrorf.get(state, None)
self.lexeoff = self.lexstateeoff.get(state, None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self, state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self, n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre, lexindexfunc in self.lexre:
m = lexre.match(lexdata, lexpos)
if not m:
continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func, tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if newtok.type not in self.lextokens_all:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.__code__.co_filename, func.__code__.co_firstlineno,
func.__name__, newtok.type), lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = 'error'
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok:
continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
if self.lexeoff:
tok = LexToken()
tok.type = 'eof'
tok.value = ''
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
self.lexpos = lexpos
newtok = self.lexeoff(tok)
return newtok
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError('No input string given with input()')
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# _get_regex(func)
#
# Returns the regular expression assigned to a function either as a doc string
# or as a .regex attribute attached by the @TOKEN decorator.
# -----------------------------------------------------------------------------
def _get_regex(func):
return getattr(func, 'regex', func.__doc__)
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist, namelist):
result = []
for f, name in zip(funclist, namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist, fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]], n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist, reflags, ldict, toknames):
if not relist:
return []
regex = '|'.join(relist)
try:
lexre = re.compile(regex, reflags)
# Build the index to function map for the matching engine
lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
lexindexnames = lexindexfunc[:]
for f, i in lexre.groupindex.items():
handle = ldict.get(f, None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle, toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find('ignore_') > 0:
lexindexfunc[i] = (None, None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre, lexindexfunc)], [regex], [lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0:
m = 1
llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
return (llist+rlist), (lre+rre), (lnames+rnames)
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s, names):
nonstate = 1
parts = s.split('_')
for i, part in enumerate(parts[1:], 1):
if part not in names and part != 'ANY':
break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = '_'.join(parts[i:])
return (states, tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self, ldict, log=None, reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = {'INITIAL': 'inclusive'}
self.modules = set()
self.error = False
self.log = PlyLogger(sys.stderr) if log is None else log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get('tokens', None)
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'", n)
self.error = True
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get('literals', '')
if not self.literals:
self.literals = ''
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c, StringTypes) or len(c) > 1:
self.log.error('Invalid literal %s. Must be a single character', repr(c))
self.error = True
except TypeError:
self.log.error('Invalid literals specification. literals must be a sequence of characters')
self.error = True
def get_states(self):
self.states = self.ldict.get('states', None)
# Build statemap
if self.states:
if not isinstance(self.states, (tuple, list)):
self.log.error('states must be defined as a tuple or list')
self.error = True
else:
for s in self.states:
if not isinstance(s, tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
self.error = True
continue
name, statetype = s
if not isinstance(name, StringTypes):
self.log.error('State name %s must be a string', repr(name))
self.error = True
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
self.error = True
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined", name)
self.error = True
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_']
# Now build up a list of functions and a list of strings
self.toknames = {} # Mapping of symbols to token names
self.funcsym = {} # Symbols defined as functions
self.strsym = {} # Symbols defined as strings
self.ignore = {} # Ignore strings by state
self.errorf = {} # Error functions by state
self.eoff = {} # EOF functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error('No rules of the form t_rulename are defined')
self.error = True
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f, self.stateinfo)
self.toknames[f] = tokname
if hasattr(t, '__call__'):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'eof':
for s in states:
self.eoff[s] = t
elif tokname == 'ignore':
line = t.__code__.co_firstlineno
file = t.__code__.co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
self.error = True
else:
for s in states:
self.funcsym[s].append((f, t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if '\\' in t:
self.log.warning("%s contains a literal backslash '\\'", f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = True
else:
for s in states:
self.strsym[s].append((f, t))
else:
self.log.error('%s not defined as a function or string', f)
self.error = True
# Sort the functions by line number
for f in self.funcsym.values():
f.sort(key=lambda x: x[1].__code__.co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
s.sort(key=lambda x: len(x[1]), reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
continue
if not _get_regex(f):
self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags)
if c.match(''):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
self.error = True
except re.error as e:
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
if '#' in _get_regex(f):
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
self.error = True
# Validate all rules defined by strings
for name, r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = True
continue
if tokname not in self.tokens and tokname.find('ignore_') < 0:
self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (name, r), self.reflags)
if (c.match('')):
self.log.error("Regular expression for rule '%s' matches empty string", name)
self.error = True
except re.error as e:
self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
self.error = True
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'", state)
self.error = True
# Validate the error function
efunc = self.errorf.get(state, None)
if efunc:
f = efunc
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
for module in self.modules:
self.validate_module(module)
# -----------------------------------------------------------------------------
# validate_module()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the source code of the given module.
# -----------------------------------------------------------------------------
def validate_module(self, module):
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
return
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = {}
linen += 1
for line in lines:
m = fre.match(line)
if not m:
m = sre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
self.error = True
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None):
if lextab is None:
lextab = 'lextab'
global lexer
ldict = None
stateinfo = {'INITIAL': 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token, input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object:
module = object
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
ldict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in ldict:
ldict['__file__'] = sys.modules[ldict['__module__']].__file__
else:
ldict = get_caller_module_dict(2)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = ldict.get('__package__')
if pkg and isinstance(lextab, str):
if '.' not in lextab:
lextab = pkg + '.' + lextab
# Collect parser information from the dictionary
linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab, ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info('lex: tokens = %r', linfo.tokens)
debuglog.info('lex: literals = %r', linfo.literals)
debuglog.info('lex: states = %r', linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = set()
for n in linfo.tokens:
lexobj.lextokens.add(n)
# Get literals specification
if isinstance(linfo.literals, (list, tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = {}
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
# Now add all of the simple rules
for name, r in linfo.strsym[state]:
regex_list.append('(?P<%s>%s)' % (name, r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i, text in enumerate(re_text):
debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state, stype in stateinfo.items():
if state != 'INITIAL' and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere['INITIAL']
lexobj.lexretext = lexobj.lexstateretext['INITIAL']
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
if not lexobj.lexerrorf:
errorlog.warning('No t_error rule is defined')
# Set up eof functions
lexobj.lexstateeoff = linfo.eoff
lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
# Check state information for ignore and error rules
for s, stype in stateinfo.items():
if stype == 'exclusive':
if s not in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if s not in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if s not in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
if s not in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If lextab specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(lextab, types.ModuleType):
srcfile = lextab.__file__
else:
if '.' not in lextab:
srcfile = ldict['__file__']
else:
parts = lextab.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
try:
lexobj.writetab(lextab, outputdir)
except IOError as e:
errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None, data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write('Reading from standard input (type EOF to end):\n')
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while True:
tok = _token()
if not tok:
break
sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_regex(f):
if hasattr(r, '__call__'):
f.regex = _get_regex(r)
else:
f.regex = r
return f
return set_regex
# Alternative spelling of the TOKEN decorator
Token = TOKEN
|
605c161d63c2500f7960252242d83ffee1e31e38145d24f00a399318b6efd13f | # -----------------------------------------------------------------------------
# cpp.py
#
# Author: David Beazley (http://www.dabeaz.com)
# Copyright (C) 2007
# All rights reserved
#
# This module implements an ANSI-C style lexical preprocessor for PLY.
# -----------------------------------------------------------------------------
from __future__ import generators
import sys
# Some Python 3 compatibility shims
if sys.version_info.major < 3:
STRING_TYPES = (str, unicode)
else:
STRING_TYPES = str
xrange = range
# -----------------------------------------------------------------------------
# Default preprocessor lexer definitions. These tokens are enough to get
# a basic preprocessor working. Other modules may import these if they want
# -----------------------------------------------------------------------------
tokens = (
'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND'
)
literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
# Whitespace
def t_CPP_WS(t):
r'\s+'
t.lexer.lineno += t.value.count("\n")
return t
t_CPP_POUND = r'\#'
t_CPP_DPOUND = r'\#\#'
# Identifier
t_CPP_ID = r'[A-Za-z_][\w_]*'
# Integer literal
def CPP_INTEGER(t):
r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)'
return t
t_CPP_INTEGER = CPP_INTEGER
# Floating literal
t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
def t_CPP_STRING(t):
r'\"([^\\\n]|(\\(.|\n)))*?\"'
t.lexer.lineno += t.value.count("\n")
return t
# Character constant 'c' or L'c'
def t_CPP_CHAR(t):
r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
t.lexer.lineno += t.value.count("\n")
return t
# Comment
def t_CPP_COMMENT1(t):
r'(/\*(.|\n)*?\*/)'
ncr = t.value.count("\n")
t.lexer.lineno += ncr
# replace with one space or a number of '\n'
t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' '
return t
# Line comment
def t_CPP_COMMENT2(t):
r'(//.*?(\n|$))'
# replace with '/n'
t.type = 'CPP_WS'; t.value = '\n'
return t
def t_error(t):
t.type = t.value[0]
t.value = t.value[0]
t.lexer.skip(1)
return t
import re
import copy
import time
import os.path
# -----------------------------------------------------------------------------
# trigraph()
#
# Given an input string, this function replaces all trigraph sequences.
# The following mapping is used:
#
# ??= #
# ??/ \
# ??' ^
# ??( [
# ??) ]
# ??! |
# ??< {
# ??> }
# ??- ~
# -----------------------------------------------------------------------------
_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
_trigraph_rep = {
'=':'#',
'/':'\\',
"'":'^',
'(':'[',
')':']',
'!':'|',
'<':'{',
'>':'}',
'-':'~'
}
def trigraph(input):
return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
# ------------------------------------------------------------------
# Macro object
#
# This object holds information about preprocessor macros
#
# .name - Macro name (string)
# .value - Macro value (a list of tokens)
# .arglist - List of argument names
# .variadic - Boolean indicating whether or not variadic macro
# .vararg - Name of the variadic parameter
#
# When a macro is created, the macro replacement token sequence is
# pre-scanned and used to create patch lists that are later used
# during macro expansion
# ------------------------------------------------------------------
class Macro(object):
def __init__(self,name,value,arglist=None,variadic=False):
self.name = name
self.value = value
self.arglist = arglist
self.variadic = variadic
if variadic:
self.vararg = arglist[-1]
self.source = None
# ------------------------------------------------------------------
# Preprocessor object
#
# Object representing a preprocessor. Contains macro definitions,
# include directories, and other information
# ------------------------------------------------------------------
class Preprocessor(object):
def __init__(self,lexer=None):
if lexer is None:
lexer = lex.lexer
self.lexer = lexer
self.macros = { }
self.path = []
self.temp_path = []
# Probe the lexer for selected tokens
self.lexprobe()
tm = time.localtime()
self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
self.parser = None
# -----------------------------------------------------------------------------
# tokenize()
#
# Utility function. Given a string of text, tokenize into a list of tokens
# -----------------------------------------------------------------------------
def tokenize(self,text):
tokens = []
self.lexer.input(text)
while True:
tok = self.lexer.token()
if not tok: break
tokens.append(tok)
return tokens
# ---------------------------------------------------------------------
# error()
#
# Report a preprocessor error/warning of some kind
# ----------------------------------------------------------------------
def error(self,file,line,msg):
print("%s:%d %s" % (file,line,msg))
# ----------------------------------------------------------------------
# lexprobe()
#
# This method probes the preprocessor lexer object to discover
# the token types of symbols that are important to the preprocessor.
# If this works right, the preprocessor will simply "work"
# with any suitable lexer regardless of how tokens have been named.
# ----------------------------------------------------------------------
def lexprobe(self):
# Determine the token type for identifiers
self.lexer.input("identifier")
tok = self.lexer.token()
if not tok or tok.value != "identifier":
print("Couldn't determine identifier type")
else:
self.t_ID = tok.type
# Determine the token type for integers
self.lexer.input("12345")
tok = self.lexer.token()
if not tok or int(tok.value) != 12345:
print("Couldn't determine integer type")
else:
self.t_INTEGER = tok.type
self.t_INTEGER_TYPE = type(tok.value)
# Determine the token type for strings enclosed in double quotes
self.lexer.input("\"filename\"")
tok = self.lexer.token()
if not tok or tok.value != "\"filename\"":
print("Couldn't determine string type")
else:
self.t_STRING = tok.type
# Determine the token type for whitespace--if any
self.lexer.input(" ")
tok = self.lexer.token()
if not tok or tok.value != " ":
self.t_SPACE = None
else:
self.t_SPACE = tok.type
# Determine the token type for newlines
self.lexer.input("\n")
tok = self.lexer.token()
if not tok or tok.value != "\n":
self.t_NEWLINE = None
print("Couldn't determine token for newlines")
else:
self.t_NEWLINE = tok.type
self.t_WS = (self.t_SPACE, self.t_NEWLINE)
# Check for other characters used by the preprocessor
chars = [ '<','>','#','##','\\','(',')',',','.']
for c in chars:
self.lexer.input(c)
tok = self.lexer.token()
if not tok or tok.value != c:
print("Unable to lex '%s' required for preprocessor" % c)
# ----------------------------------------------------------------------
# add_path()
#
# Adds a search path to the preprocessor.
# ----------------------------------------------------------------------
def add_path(self,path):
self.path.append(path)
# ----------------------------------------------------------------------
# group_lines()
#
# Given an input string, this function splits it into lines. Trailing whitespace
# is removed. Any line ending with \ is grouped with the next line. This
# function forms the lowest level of the preprocessor---grouping into text into
# a line-by-line format.
# ----------------------------------------------------------------------
def group_lines(self,input):
lex = self.lexer.clone()
lines = [x.rstrip() for x in input.splitlines()]
for i in xrange(len(lines)):
j = i+1
while lines[i].endswith('\\') and (j < len(lines)):
lines[i] = lines[i][:-1]+lines[j]
lines[j] = ""
j += 1
input = "\n".join(lines)
lex.input(input)
lex.lineno = 1
current_line = []
while True:
tok = lex.token()
if not tok:
break
current_line.append(tok)
if tok.type in self.t_WS and '\n' in tok.value:
yield current_line
current_line = []
if current_line:
yield current_line
# ----------------------------------------------------------------------
# tokenstrip()
#
# Remove leading/trailing whitespace tokens from a token list
# ----------------------------------------------------------------------
def tokenstrip(self,tokens):
i = 0
while i < len(tokens) and tokens[i].type in self.t_WS:
i += 1
del tokens[:i]
i = len(tokens)-1
while i >= 0 and tokens[i].type in self.t_WS:
i -= 1
del tokens[i+1:]
return tokens
# ----------------------------------------------------------------------
# collect_args()
#
# Collects comma separated arguments from a list of tokens. The arguments
# must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
# where tokencount is the number of tokens consumed, args is a list of arguments,
# and positions is a list of integers containing the starting index of each
# argument. Each argument is represented by a list of tokens.
#
# When collecting arguments, leading and trailing whitespace is removed
# from each argument.
#
# This function properly handles nested parenthesis and commas---these do not
# define new arguments.
# ----------------------------------------------------------------------
def collect_args(self,tokenlist):
args = []
positions = []
current_arg = []
nesting = 1
tokenlen = len(tokenlist)
# Search for the opening '('.
i = 0
while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
i += 1
if (i < tokenlen) and (tokenlist[i].value == '('):
positions.append(i+1)
else:
self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
return 0, [], []
i += 1
while i < tokenlen:
t = tokenlist[i]
if t.value == '(':
current_arg.append(t)
nesting += 1
elif t.value == ')':
nesting -= 1
if nesting == 0:
if current_arg:
args.append(self.tokenstrip(current_arg))
positions.append(i)
return i+1,args,positions
current_arg.append(t)
elif t.value == ',' and nesting == 1:
args.append(self.tokenstrip(current_arg))
positions.append(i+1)
current_arg = []
else:
current_arg.append(t)
i += 1
# Missing end argument
self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
return 0, [],[]
# ----------------------------------------------------------------------
# macro_prescan()
#
# Examine the macro value (token sequence) and identify patch points
# This is used to speed up macro expansion later on---we'll know
# right away where to apply patches to the value to form the expansion
# ----------------------------------------------------------------------
def macro_prescan(self,macro):
macro.patch = [] # Standard macro arguments
macro.str_patch = [] # String conversion expansion
macro.var_comma_patch = [] # Variadic macro comma patch
i = 0
while i < len(macro.value):
if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
argnum = macro.arglist.index(macro.value[i].value)
# Conversion of argument to a string
if i > 0 and macro.value[i-1].value == '#':
macro.value[i] = copy.copy(macro.value[i])
macro.value[i].type = self.t_STRING
del macro.value[i-1]
macro.str_patch.append((argnum,i-1))
continue
# Concatenation
elif (i > 0 and macro.value[i-1].value == '##'):
macro.patch.append(('c',argnum,i-1))
del macro.value[i-1]
continue
elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
macro.patch.append(('c',argnum,i))
i += 1
continue
# Standard expansion
else:
macro.patch.append(('e',argnum,i))
elif macro.value[i].value == '##':
if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
(macro.value[i+1].value == macro.vararg):
macro.var_comma_patch.append(i-1)
i += 1
macro.patch.sort(key=lambda x: x[2],reverse=True)
# ----------------------------------------------------------------------
# macro_expand_args()
#
# Given a Macro and list of arguments (each a token list), this method
# returns an expanded version of a macro. The return value is a token sequence
# representing the replacement macro tokens
# ----------------------------------------------------------------------
def macro_expand_args(self,macro,args):
# Make a copy of the macro token sequence
rep = [copy.copy(_x) for _x in macro.value]
# Make string expansion patches. These do not alter the length of the replacement sequence
str_expansion = {}
for argnum, i in macro.str_patch:
if argnum not in str_expansion:
str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
rep[i] = copy.copy(rep[i])
rep[i].value = str_expansion[argnum]
# Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
comma_patch = False
if macro.variadic and not args[-1]:
for i in macro.var_comma_patch:
rep[i] = None
comma_patch = True
# Make all other patches. The order of these matters. It is assumed that the patch list
# has been sorted in reverse order of patch location since replacements will cause the
# size of the replacement sequence to expand from the patch point.
expanded = { }
for ptype, argnum, i in macro.patch:
# Concatenation. Argument is left unexpanded
if ptype == 'c':
rep[i:i+1] = args[argnum]
# Normal expansion. Argument is macro expanded first
elif ptype == 'e':
if argnum not in expanded:
expanded[argnum] = self.expand_macros(args[argnum])
rep[i:i+1] = expanded[argnum]
# Get rid of removed comma if necessary
if comma_patch:
rep = [_i for _i in rep if _i]
return rep
# ----------------------------------------------------------------------
# expand_macros()
#
# Given a list of tokens, this function performs macro expansion.
# The expanded argument is a dictionary that contains macros already
# expanded. This is used to prevent infinite recursion.
# ----------------------------------------------------------------------
def expand_macros(self,tokens,expanded=None):
if expanded is None:
expanded = {}
i = 0
while i < len(tokens):
t = tokens[i]
if t.type == self.t_ID:
if t.value in self.macros and t.value not in expanded:
# Yes, we found a macro match
expanded[t.value] = True
m = self.macros[t.value]
if not m.arglist:
# A simple macro
ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
for e in ex:
e.lineno = t.lineno
tokens[i:i+1] = ex
i += len(ex)
else:
# A macro with arguments
j = i + 1
while j < len(tokens) and tokens[j].type in self.t_WS:
j += 1
if tokens[j].value == '(':
tokcount,args,positions = self.collect_args(tokens[j:])
if not m.variadic and len(args) != len(m.arglist):
self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
i = j + tokcount
elif m.variadic and len(args) < len(m.arglist)-1:
if len(m.arglist) > 2:
self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
else:
self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
i = j + tokcount
else:
if m.variadic:
if len(args) == len(m.arglist)-1:
args.append([])
else:
args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
del args[len(m.arglist):]
# Get macro replacement text
rep = self.macro_expand_args(m,args)
rep = self.expand_macros(rep,expanded)
for r in rep:
r.lineno = t.lineno
tokens[i:j+tokcount] = rep
i += len(rep)
del expanded[t.value]
continue
elif t.value == '__LINE__':
t.type = self.t_INTEGER
t.value = self.t_INTEGER_TYPE(t.lineno)
i += 1
return tokens
# ----------------------------------------------------------------------
# evalexpr()
#
# Evaluate an expression token sequence for the purposes of evaluating
# integral expressions.
# ----------------------------------------------------------------------
def evalexpr(self,tokens):
# tokens = tokenize(line)
# Search for defined macros
i = 0
while i < len(tokens):
if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
j = i + 1
needparen = False
result = "0L"
while j < len(tokens):
if tokens[j].type in self.t_WS:
j += 1
continue
elif tokens[j].type == self.t_ID:
if tokens[j].value in self.macros:
result = "1L"
else:
result = "0L"
if not needparen: break
elif tokens[j].value == '(':
needparen = True
elif tokens[j].value == ')':
break
else:
self.error(self.source,tokens[i].lineno,"Malformed defined()")
j += 1
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE(result)
del tokens[i+1:j+1]
i += 1
tokens = self.expand_macros(tokens)
for i,t in enumerate(tokens):
if t.type == self.t_ID:
tokens[i] = copy.copy(t)
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE("0L")
elif t.type == self.t_INTEGER:
tokens[i] = copy.copy(t)
# Strip off any trailing suffixes
tokens[i].value = str(tokens[i].value)
while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
tokens[i].value = tokens[i].value[:-1]
expr = "".join([str(x.value) for x in tokens])
expr = expr.replace("&&"," and ")
expr = expr.replace("||"," or ")
expr = expr.replace("!"," not ")
try:
result = eval(expr)
except Exception:
self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
result = 0
return result
# ----------------------------------------------------------------------
# parsegen()
#
# Parse an input string/
# ----------------------------------------------------------------------
def parsegen(self,input,source=None):
# Replace trigraph sequences
t = trigraph(input)
lines = self.group_lines(t)
if not source:
source = ""
self.define("__FILE__ \"%s\"" % source)
self.source = source
chunk = []
enable = True
iftrigger = False
ifstack = []
for x in lines:
for i,tok in enumerate(x):
if tok.type not in self.t_WS: break
if tok.value == '#':
# Preprocessor directive
# insert necessary whitespace instead of eaten tokens
for tok in x:
if tok.type in self.t_WS and '\n' in tok.value:
chunk.append(tok)
dirtokens = self.tokenstrip(x[i+1:])
if dirtokens:
name = dirtokens[0].value
args = self.tokenstrip(dirtokens[1:])
else:
name = ""
args = []
if name == 'define':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.define(args)
elif name == 'include':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
oldfile = self.macros['__FILE__']
for tok in self.include(args):
yield tok
self.macros['__FILE__'] = oldfile
self.source = source
elif name == 'undef':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.undef(args)
elif name == 'ifdef':
ifstack.append((enable,iftrigger))
if enable:
if not args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'ifndef':
ifstack.append((enable,iftrigger))
if enable:
if args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'if':
ifstack.append((enable,iftrigger))
if enable:
result = self.evalexpr(args)
if not result:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'elif':
if ifstack:
if ifstack[-1][0]: # We only pay attention if outer "if" allows this
if enable: # If already true, we flip enable False
enable = False
elif not iftrigger: # If False, but not triggered yet, we'll check expression
result = self.evalexpr(args)
if result:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
elif name == 'else':
if ifstack:
if ifstack[-1][0]:
if enable:
enable = False
elif not iftrigger:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
elif name == 'endif':
if ifstack:
enable,iftrigger = ifstack.pop()
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
else:
# Unknown preprocessor directive
pass
else:
# Normal text
if enable:
chunk.extend(x)
for tok in self.expand_macros(chunk):
yield tok
chunk = []
# ----------------------------------------------------------------------
# include()
#
# Implementation of file-inclusion
# ----------------------------------------------------------------------
def include(self,tokens):
# Try to extract the filename and then process an include file
if not tokens:
return
if tokens:
if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
tokens = self.expand_macros(tokens)
if tokens[0].value == '<':
# Include <...>
i = 1
while i < len(tokens):
if tokens[i].value == '>':
break
i += 1
else:
print("Malformed #include <...>")
return
filename = "".join([x.value for x in tokens[1:i]])
path = self.path + [""] + self.temp_path
elif tokens[0].type == self.t_STRING:
filename = tokens[0].value[1:-1]
path = self.temp_path + [""] + self.path
else:
print("Malformed #include statement")
return
for p in path:
iname = os.path.join(p,filename)
try:
data = open(iname,"r").read()
dname = os.path.dirname(iname)
if dname:
self.temp_path.insert(0,dname)
for tok in self.parsegen(data,filename):
yield tok
if dname:
del self.temp_path[0]
break
except IOError:
pass
else:
print("Couldn't find '%s'" % filename)
# ----------------------------------------------------------------------
# define()
#
# Define a new macro
# ----------------------------------------------------------------------
def define(self,tokens):
if isinstance(tokens,STRING_TYPES):
tokens = self.tokenize(tokens)
linetok = tokens
try:
name = linetok[0]
if len(linetok) > 1:
mtype = linetok[1]
else:
mtype = None
if not mtype:
m = Macro(name.value,[])
self.macros[name.value] = m
elif mtype.type in self.t_WS:
# A normal macro
m = Macro(name.value,self.tokenstrip(linetok[2:]))
self.macros[name.value] = m
elif mtype.value == '(':
# A macro with arguments
tokcount, args, positions = self.collect_args(linetok[1:])
variadic = False
for a in args:
if variadic:
print("No more arguments may follow a variadic argument")
break
astr = "".join([str(_i.value) for _i in a])
if astr == "...":
variadic = True
a[0].type = self.t_ID
a[0].value = '__VA_ARGS__'
variadic = True
del a[1:]
continue
elif astr[-3:] == "..." and a[0].type == self.t_ID:
variadic = True
del a[1:]
# If, for some reason, "." is part of the identifier, strip off the name for the purposes
# of macro expansion
if a[0].value[-3:] == '...':
a[0].value = a[0].value[:-3]
continue
if len(a) > 1 or a[0].type != self.t_ID:
print("Invalid macro argument")
break
else:
mvalue = self.tokenstrip(linetok[1+tokcount:])
i = 0
while i < len(mvalue):
if i+1 < len(mvalue):
if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
del mvalue[i]
continue
elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
del mvalue[i+1]
i += 1
m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
self.macro_prescan(m)
self.macros[name.value] = m
else:
print("Bad macro definition")
except LookupError:
print("Bad macro definition")
# ----------------------------------------------------------------------
# undef()
#
# Undefine a macro
# ----------------------------------------------------------------------
def undef(self,tokens):
id = tokens[0].value
try:
del self.macros[id]
except LookupError:
pass
# ----------------------------------------------------------------------
# parse()
#
# Parse input text.
# ----------------------------------------------------------------------
def parse(self,input,source=None,ignore={}):
self.ignore = ignore
self.parser = self.parsegen(input,source)
# ----------------------------------------------------------------------
# token()
#
# Method to return individual tokens
# ----------------------------------------------------------------------
def token(self):
try:
while True:
tok = next(self.parser)
if tok.type not in self.ignore: return tok
except StopIteration:
self.parser = None
return None
if __name__ == '__main__':
import ply.lex as lex
lexer = lex.lex()
# Run a preprocessor
import sys
f = open(sys.argv[1])
input = f.read()
p = Preprocessor(lexer)
p.parse(input,sys.argv[1])
while True:
tok = p.token()
if not tok: break
print(p.source, tok)
|
d8960d798b6b3f3d49ccb48b3b77781ac4bccc953c8d8fc8fc2475548f605ab0 | # ply: ygen.py
#
# This is a support program that auto-generates different versions of the YACC parsing
# function with different features removed for the purposes of performance.
#
# Users should edit the method LParser.parsedebug() in yacc.py. The source code
# for that method is then used to create the other methods. See the comments in
# yacc.py for further details.
import os.path
import shutil
def get_source_range(lines, tag):
srclines = enumerate(lines)
start_tag = '#--! %s-start' % tag
end_tag = '#--! %s-end' % tag
for start_index, line in srclines:
if line.strip().startswith(start_tag):
break
for end_index, line in srclines:
if line.strip().endswith(end_tag):
break
return (start_index + 1, end_index)
def filter_section(lines, tag):
filtered_lines = []
include = True
tag_text = '#--! %s' % tag
for line in lines:
if line.strip().startswith(tag_text):
include = not include
elif include:
filtered_lines.append(line)
return filtered_lines
def main():
dirname = os.path.dirname(__file__)
shutil.copy2(os.path.join(dirname, 'yacc.py'), os.path.join(dirname, 'yacc.py.bak'))
with open(os.path.join(dirname, 'yacc.py'), 'r') as f:
lines = f.readlines()
parse_start, parse_end = get_source_range(lines, 'parsedebug')
parseopt_start, parseopt_end = get_source_range(lines, 'parseopt')
parseopt_notrack_start, parseopt_notrack_end = get_source_range(lines, 'parseopt-notrack')
# Get the original source
orig_lines = lines[parse_start:parse_end]
# Filter the DEBUG sections out
parseopt_lines = filter_section(orig_lines, 'DEBUG')
# Filter the TRACKING sections out
parseopt_notrack_lines = filter_section(parseopt_lines, 'TRACKING')
# Replace the parser source sections with updated versions
lines[parseopt_notrack_start:parseopt_notrack_end] = parseopt_notrack_lines
lines[parseopt_start:parseopt_end] = parseopt_lines
lines = [line.rstrip()+'\n' for line in lines]
with open(os.path.join(dirname, 'yacc.py'), 'w') as f:
f.writelines(lines)
print('Updated yacc.py')
if __name__ == '__main__':
main()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.