hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
ed880b6c12358d97815ddc921ce5bdc9d43eb8aee307baab5ff6b4c72b6332c2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from asdf import tagged
from asdf.tests.helpers import assert_tree_match
from astropy.io.misc.asdf.deprecation import create_asdf_deprecation_warning
from astropy.io.misc.asdf.tags.transform.basic import TransformType
from astropy.modeling.core import CompoundModel, Model
from astropy.modeling.models import Const1D, Identity, Mapping
__all__ = ["CompoundType", "RemapAxesType"]
_operator_to_tag_mapping = {
"+": "add",
"-": "subtract",
"*": "multiply",
"/": "divide",
"**": "power",
"|": "compose",
"&": "concatenate",
"fix_inputs": "fix_inputs",
}
_tag_to_method_mapping = {
"add": "__add__",
"subtract": "__sub__",
"multiply": "__mul__",
"divide": "__truediv__",
"power": "__pow__",
"compose": "__or__",
"concatenate": "__and__",
"fix_inputs": "fix_inputs",
}
class CompoundType(TransformType):
name = ["transform/" + x for x in _tag_to_method_mapping.keys()]
types = [CompoundModel]
version = "1.2.0"
handle_dynamic_subclasses = True
@classmethod
def from_tree_tagged(cls, node, ctx):
warnings.warn(create_asdf_deprecation_warning())
tag = node._tag[node._tag.rfind("/") + 1 :]
tag = tag[: tag.rfind("-")]
oper = _tag_to_method_mapping[tag]
left = node["forward"][0]
if not isinstance(left, Model):
raise TypeError(f"Unknown model type '{node['forward'][0]._tag}'")
right = node["forward"][1]
if not isinstance(right, Model) and not (
oper == "fix_inputs" and isinstance(right, dict)
):
raise TypeError(f"Unknown model type '{node['forward'][1]._tag}'")
if oper == "fix_inputs":
right = dict(zip(right["keys"], right["values"]))
model = CompoundModel("fix_inputs", left, right)
else:
model = getattr(left, oper)(right)
return cls._from_tree_base_transform_members(model, node, ctx)
@classmethod
def to_tree_tagged(cls, model, ctx):
warnings.warn(create_asdf_deprecation_warning())
left = model.left
if isinstance(model.right, dict):
right = {
"keys": list(model.right.keys()),
"values": list(model.right.values()),
}
else:
right = model.right
node = {"forward": [left, right]}
try:
tag_name = "transform/" + _operator_to_tag_mapping[model.op]
except KeyError:
raise ValueError(f"Unknown operator '{model.op}'")
node = tagged.tag_object(cls.make_yaml_tag(tag_name), node, ctx=ctx)
return cls._to_tree_base_transform_members(model, node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert_tree_match(a.left, b.left)
assert_tree_match(a.right, b.right)
class RemapAxesType(TransformType):
name = "transform/remap_axes"
types = [Mapping]
version = "1.3.0"
@classmethod
def from_tree_transform(cls, node, ctx):
mapping = node["mapping"]
n_inputs = node.get("n_inputs")
if all([isinstance(x, int) for x in mapping]):
return Mapping(tuple(mapping), n_inputs)
if n_inputs is None:
n_inputs = max(x for x in mapping if isinstance(x, int)) + 1
transform = Identity(n_inputs)
new_mapping = []
i = n_inputs
for entry in mapping:
if isinstance(entry, int):
new_mapping.append(entry)
else:
new_mapping.append(i)
transform = transform & Const1D(entry.value)
i += 1
return transform | Mapping(new_mapping)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {"mapping": list(model.mapping)}
if model.n_inputs > max(model.mapping) + 1:
node["n_inputs"] = model.n_inputs
return node
@classmethod
def assert_equal(cls, a, b):
TransformType.assert_equal(a, b)
assert a.mapping == b.mapping
assert a.n_inputs == b.n_inputs
|
33ccbb3fc468d79fda0c199e4510831bd95340826eb21a8607561eba4e58ac1a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_array_equal
from astropy.io.misc.asdf.tags.transform.basic import TransformType
from astropy.modeling import powerlaws
from . import _parameter_to_value
__all__ = [
"PowerLaw1DType",
"BrokenPowerLaw1DType",
"SmoothlyBrokenPowerLaw1DType",
"ExponentialCutoffPowerLaw1DType",
"LogParabola1DType",
]
class PowerLaw1DType(TransformType):
name = "transform/power_law1d"
version = "1.0.0"
types = ["astropy.modeling.powerlaws.PowerLaw1D"]
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.PowerLaw1D(
amplitude=node["amplitude"], x_0=node["x_0"], alpha=node["alpha"]
)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {
"amplitude": _parameter_to_value(model.amplitude),
"x_0": _parameter_to_value(model.x_0),
"alpha": _parameter_to_value(model.alpha),
}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert isinstance(a, powerlaws.PowerLaw1D) and isinstance(
b, powerlaws.PowerLaw1D
)
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.alpha, b.alpha)
class BrokenPowerLaw1DType(TransformType):
name = "transform/broken_power_law1d"
version = "1.0.0"
types = ["astropy.modeling.powerlaws.BrokenPowerLaw1D"]
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.BrokenPowerLaw1D(
amplitude=node["amplitude"],
x_break=node["x_break"],
alpha_1=node["alpha_1"],
alpha_2=node["alpha_2"],
)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {
"amplitude": _parameter_to_value(model.amplitude),
"x_break": _parameter_to_value(model.x_break),
"alpha_1": _parameter_to_value(model.alpha_1),
"alpha_2": _parameter_to_value(model.alpha_2),
}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert isinstance(a, powerlaws.BrokenPowerLaw1D) and isinstance(
b, powerlaws.BrokenPowerLaw1D
)
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_break, b.x_break)
assert_array_equal(a.alpha_1, b.alpha_1)
assert_array_equal(a.alpha_2, b.alpha_2)
class SmoothlyBrokenPowerLaw1DType(TransformType):
name = "transform/smoothly_broken_power_law1d"
version = "1.0.0"
types = ["astropy.modeling.powerlaws.SmoothlyBrokenPowerLaw1D"]
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.SmoothlyBrokenPowerLaw1D(
amplitude=node["amplitude"],
x_break=node["x_break"],
alpha_1=node["alpha_1"],
alpha_2=node["alpha_2"],
delta=node["delta"],
)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {
"amplitude": _parameter_to_value(model.amplitude),
"x_break": _parameter_to_value(model.x_break),
"alpha_1": _parameter_to_value(model.alpha_1),
"alpha_2": _parameter_to_value(model.alpha_2),
"delta": _parameter_to_value(model.delta),
}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert isinstance(a, powerlaws.SmoothlyBrokenPowerLaw1D) and isinstance(
b, powerlaws.SmoothlyBrokenPowerLaw1D
)
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_break, b.x_break)
assert_array_equal(a.alpha_1, b.alpha_1)
assert_array_equal(a.alpha_2, b.alpha_2)
assert_array_equal(a.delta, b.delta)
class ExponentialCutoffPowerLaw1DType(TransformType):
name = "transform/exponential_cutoff_power_law1d"
version = "1.0.0"
types = ["astropy.modeling.powerlaws.ExponentialCutoffPowerLaw1D"]
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.ExponentialCutoffPowerLaw1D(
amplitude=node["amplitude"],
x_0=node["x_0"],
alpha=node["alpha"],
x_cutoff=node["x_cutoff"],
)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {
"amplitude": _parameter_to_value(model.amplitude),
"x_0": _parameter_to_value(model.x_0),
"alpha": _parameter_to_value(model.alpha),
"x_cutoff": _parameter_to_value(model.x_cutoff),
}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert isinstance(a, powerlaws.ExponentialCutoffPowerLaw1D) and isinstance(
b, powerlaws.ExponentialCutoffPowerLaw1D
)
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.alpha, b.alpha)
assert_array_equal(a.x_cutoff, b.x_cutoff)
class LogParabola1DType(TransformType):
name = "transform/log_parabola1d"
version = "1.0.0"
types = ["astropy.modeling.powerlaws.LogParabola1D"]
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.LogParabola1D(
amplitude=node["amplitude"],
x_0=node["x_0"],
alpha=node["alpha"],
beta=node["beta"],
)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {
"amplitude": _parameter_to_value(model.amplitude),
"x_0": _parameter_to_value(model.x_0),
"alpha": _parameter_to_value(model.alpha),
"beta": _parameter_to_value(model.beta),
}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert isinstance(a, powerlaws.LogParabola1D) and isinstance(
b, powerlaws.LogParabola1D
)
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.alpha, b.alpha)
assert_array_equal(a.beta, b.beta)
|
4d4d3764354431f7d24c1b0a5de33fd853081193b0edd95b74f514648a4c903d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import numpy as np
from astropy.io.misc.asdf.types import AstropyType
from astropy.time import TimeDelta
__all__ = ["TimeDeltaType"]
allclose_jd = functools.partial(np.allclose, rtol=2.0**-52, atol=0)
allclose_jd2 = functools.partial(
np.allclose, rtol=2.0**-52, atol=2.0**-52
) # 20 ps atol
allclose_sec = functools.partial(
np.allclose, rtol=2.0**-52, atol=2.0**-52 * 24 * 3600
) # 20 ps atol
class TimeDeltaType(AstropyType):
name = "time/timedelta"
types = [TimeDelta]
version = "1.0.0"
@classmethod
def to_tree(cls, obj, ctx):
return obj.info._represent_as_dict()
@classmethod
def from_tree(cls, node, ctx):
return TimeDelta.info._construct_from_dict(node)
@classmethod
def assert_equal(cls, old, new):
assert allclose_jd(old.jd, new.jd)
assert allclose_jd2(old.jd2, new.jd2)
assert allclose_sec(old.sec, new.sec)
|
f97d67c9df228b2be77198934dfc8f0d6e4f90c54fc1893666493dc02689ca6c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from asdf.versioning import AsdfSpec
from numpy.testing import assert_array_equal
from astropy import time
from astropy import units as u
from astropy.coordinates import EarthLocation
from astropy.io.misc.asdf.types import AstropyAsdfType
from astropy.units import Quantity
__all__ = ["TimeType"]
_guessable_formats = {"iso", "byear", "jyear", "yday"}
_astropy_format_to_asdf_format = {
"isot": "iso",
"byear_str": "byear",
"jyear_str": "jyear",
}
def _assert_earthlocation_equal(a, b):
assert_array_equal(a.x, b.x)
assert_array_equal(a.y, b.y)
assert_array_equal(a.z, b.z)
assert_array_equal(a.lat, b.lat)
assert_array_equal(a.lon, b.lon)
class TimeType(AstropyAsdfType):
name = "time/time"
version = "1.1.0"
supported_versions = ["1.0.0", AsdfSpec(">=1.1.0")]
types = ["astropy.time.core.Time"]
requires = ["astropy"]
@classmethod
def to_tree(cls, node, ctx):
fmt = node.format
if fmt == "byear":
node = time.Time(node, format="byear_str")
elif fmt == "jyear":
node = time.Time(node, format="jyear_str")
elif fmt in ("fits", "datetime", "plot_date"):
node = time.Time(node, format="isot")
fmt = node.format
fmt = _astropy_format_to_asdf_format.get(fmt, fmt)
guessable_format = fmt in _guessable_formats
if node.scale == "utc" and guessable_format and node.isscalar:
return node.value
d = {"value": node.value}
if not guessable_format:
d["format"] = fmt
if node.scale != "utc":
d["scale"] = node.scale
if node.location is not None:
x, y, z = node.location.x, node.location.y, node.location.z
# Preserve backwards compatibility for writing the old schema
# This allows WCS to test backwards compatibility with old frames
# This code does get tested in CI, but we don't run a coverage test
if cls.version == "1.0.0": # pragma: no cover
unit = node.location.unit
d["location"] = {"x": x.value, "y": y.value, "z": z.value, "unit": unit}
else:
d["location"] = {
# It seems like EarthLocations can be represented either in
# terms of Cartesian coordinates or latitude and longitude, so
# we rather arbitrarily choose the former for our representation
"x": x,
"y": y,
"z": z,
}
return d
@classmethod
def from_tree(cls, node, ctx):
if isinstance(node, (str, list, np.ndarray)):
t = time.Time(node)
fmt = _astropy_format_to_asdf_format.get(t.format, t.format)
if fmt not in _guessable_formats:
raise ValueError(f"Invalid time '{node}'")
return t
value = node["value"]
fmt = node.get("format")
scale = node.get("scale")
location = node.get("location")
if location is not None:
unit = location.get("unit", u.m)
# This ensures that we can read the v.1.0.0 schema and convert it
# to the new EarthLocation object, which expects Quantity components
for comp in ["x", "y", "z"]:
if not isinstance(location[comp], Quantity):
location[comp] = Quantity(location[comp], unit=unit)
location = EarthLocation.from_geocentric(
location["x"], location["y"], location["z"]
)
return time.Time(value, format=fmt, scale=scale, location=location)
@classmethod
def assert_equal(cls, old, new):
assert old.format == new.format
assert old.scale == new.scale
if isinstance(old.location, EarthLocation):
assert isinstance(new.location, EarthLocation)
_assert_earthlocation_equal(old.location, new.location)
else:
assert old.location == new.location
assert_array_equal(old, new)
|
285574b30979203540c501784f9d983ec09ff98ebd5c78d0568c1b73b283a42f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from asdf.tags.core import NDArrayType
from astropy.coordinates.spectral_coordinate import SpectralCoord
from astropy.io.misc.asdf.tags.unit.unit import UnitType
from astropy.io.misc.asdf.types import AstropyType
__all__ = ["SpectralCoordType"]
class SpectralCoordType(AstropyType):
"""
ASDF tag implementation used to serialize/derialize SpectralCoord objects
"""
name = "coordinates/spectralcoord"
types = [SpectralCoord]
version = "1.0.0"
@classmethod
def to_tree(cls, spec_coord, ctx):
node = {}
if isinstance(spec_coord, SpectralCoord):
node["value"] = spec_coord.value
node["unit"] = spec_coord.unit
if spec_coord.observer is not None:
node["observer"] = spec_coord.observer
if spec_coord.target is not None:
node["target"] = spec_coord.target
return node
raise TypeError(f"'{spec_coord}' is not a valid SpectralCoord")
@classmethod
def from_tree(cls, node, ctx):
if isinstance(node, SpectralCoord):
return node
unit = UnitType.from_tree(node["unit"], ctx)
value = node["value"]
observer = node["observer"] if "observer" in node else None
target = node["target"] if "observer" in node else None
if isinstance(value, NDArrayType):
value = value._make_array()
return SpectralCoord(value, unit=unit, observer=observer, target=target)
|
2ffba6e8c224d1464f6e1335e70752623aed16a6e9cefaf292495676476cbebf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import warnings
from asdf import tagged
import astropy.coordinates
import astropy.units as u
from astropy.coordinates import ICRS, Angle, Latitude, Longitude
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.io.misc.asdf.deprecation import create_asdf_deprecation_warning
from astropy.io.misc.asdf.types import AstropyType
from astropy.units import Quantity
__all__ = ["CoordType"]
SCHEMA_PATH = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"data",
"schemas",
"astropy.org",
"astropy",
)
)
def _get_frames():
"""
By reading the schema files, get the list of all the frames we can
save/load.
"""
search = os.path.join(SCHEMA_PATH, "coordinates", "frames", "*.yaml")
files = glob.glob(search)
names = []
for fpath in files:
path, fname = os.path.split(fpath)
frame, _ = fname.split("-")
# Skip baseframe because we cannot directly save / load it.
# Skip icrs because we have an explicit tag for it because there are
# two versions.
if frame not in ["baseframe", "icrs"]:
names.append(frame)
return names
class BaseCoordType:
"""
This defines the base methods for coordinates, without defining anything
related to asdf types. This allows subclasses with different types and
schemas to use this without confusing the metaclass machinery.
"""
@staticmethod
def _tag_to_frame(tag):
"""
Extract the frame name from the tag.
"""
tag = tag[tag.rfind("/") + 1 :]
tag = tag[: tag.rfind("-")]
return frame_transform_graph.lookup_name(tag)
@classmethod
def _frame_name_to_tag(cls, frame_name):
return cls.make_yaml_tag(cls._tag_prefix + frame_name)
@classmethod
def from_tree_tagged(cls, node, ctx):
warnings.warn(create_asdf_deprecation_warning())
frame = cls._tag_to_frame(node._tag)
data = node.get("data", None)
if data is not None:
return frame(node["data"], **node["frame_attributes"])
return frame(**node["frame_attributes"])
@classmethod
def to_tree_tagged(cls, frame, ctx):
warnings.warn(create_asdf_deprecation_warning())
if type(frame) not in frame_transform_graph.frame_set:
raise ValueError(
"Can only save frames that are registered with the "
"transformation graph."
)
node = {}
if frame.has_data:
node["data"] = frame.data
frame_attributes = {}
for attr in frame.frame_attributes.keys():
value = getattr(frame, attr, None)
if value is not None:
frame_attributes[attr] = value
node["frame_attributes"] = frame_attributes
return tagged.tag_object(cls._frame_name_to_tag(frame.name), node, ctx=ctx)
@classmethod
def assert_equal(cls, old, new):
assert isinstance(new, type(old))
if new.has_data:
assert u.allclose(new.data.lon, old.data.lon)
assert u.allclose(new.data.lat, old.data.lat)
class CoordType(BaseCoordType, AstropyType):
_tag_prefix = "coordinates/frames/"
name = ["coordinates/frames/" + f for f in _get_frames()]
types = [astropy.coordinates.BaseCoordinateFrame]
handle_dynamic_subclasses = True
requires = ["astropy"]
version = "1.0.0"
class ICRSType(CoordType):
"""
Define a special tag for ICRS so we can make it version 1.1.0.
"""
name = "coordinates/frames/icrs"
types = ["astropy.coordinates.ICRS"]
version = "1.1.0"
class ICRSType10(AstropyType):
name = "coordinates/frames/icrs"
types = [astropy.coordinates.ICRS]
requires = ["astropy"]
version = "1.0.0"
@classmethod
def from_tree(cls, node, ctx):
wrap_angle = Angle(node["ra"]["wrap_angle"])
ra = Longitude(
node["ra"]["value"], unit=node["ra"]["unit"], wrap_angle=wrap_angle
)
dec = Latitude(node["dec"]["value"], unit=node["dec"]["unit"])
return ICRS(ra=ra, dec=dec)
@classmethod
def to_tree(cls, frame, ctx):
node = {}
wrap_angle = Quantity(frame.ra.wrap_angle)
node["ra"] = {
"value": frame.ra.value,
"unit": frame.ra.unit.to_string(),
"wrap_angle": wrap_angle,
}
node["dec"] = {"value": frame.dec.value, "unit": frame.dec.unit.to_string()}
return node
@classmethod
def assert_equal(cls, old, new):
assert isinstance(old, ICRS)
assert isinstance(new, ICRS)
assert u.allclose(new.ra, old.ra)
assert u.allclose(new.dec, old.dec)
|
9f26ae34b8c9be4b2d074b94e182b378227165aed2571ecb7f500ba14f60ef58 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates import SkyCoord
from astropy.coordinates.tests.helper import skycoord_equal
from astropy.io.misc.asdf.types import AstropyType
class SkyCoordType(AstropyType):
name = "coordinates/skycoord"
types = [SkyCoord]
version = "1.0.0"
@classmethod
def to_tree(cls, obj, ctx):
return obj.info._represent_as_dict()
@classmethod
def from_tree(cls, tree, ctx):
return SkyCoord.info._construct_from_dict(tree)
@classmethod
def assert_equal(cls, old, new):
assert skycoord_equal(old, new)
|
46b8f3a1f6825c622f7690bfd23353e7ac397bde701201ba13caa66951de21cd | import astropy.coordinates.representation
import astropy.units as u
from astropy.coordinates.representation import BaseRepresentationOrDifferential
from astropy.io.misc.asdf.types import AstropyType
class RepresentationType(AstropyType):
name = "coordinates/representation"
types = [BaseRepresentationOrDifferential]
version = "1.0.0"
_representation_module = astropy.coordinates.representation
@classmethod
def to_tree(cls, representation, ctx):
comps = representation.components
components = {}
for c in comps:
value = getattr(representation, "_" + c, None)
if value is not None:
components[c] = value
t = type(representation)
node = {}
node["type"] = t.__name__
node["components"] = components
return node
@classmethod
def from_tree(cls, node, ctx):
rep_type = getattr(cls._representation_module, node["type"])
return rep_type(**node["components"])
@classmethod
def assert_equal(cls, old, new):
assert isinstance(new, type(old))
assert new.components == old.components
for comp in new.components:
nc = getattr(new, comp)
oc = getattr(old, comp)
assert u.allclose(nc, oc)
|
51344132014bdd8b644c9b05268a5210a033eca85ba1fb49dcd3a2ae79f29823 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates import Angle, Latitude, Longitude
from astropy.io.misc.asdf.tags.unit.quantity import QuantityType
__all__ = ["AngleType", "LatitudeType", "LongitudeType"]
class AngleType(QuantityType):
name = "coordinates/angle"
types = [Angle]
requires = ["astropy"]
version = "1.0.0"
organization = "astropy.org"
standard = "astropy"
@classmethod
def from_tree(cls, node, ctx):
return Angle(super().from_tree(node, ctx))
class LatitudeType(AngleType):
name = "coordinates/latitude"
types = [Latitude]
@classmethod
def from_tree(cls, node, ctx):
return Latitude(super().from_tree(node, ctx))
class LongitudeType(AngleType):
name = "coordinates/longitude"
types = [Longitude]
@classmethod
def from_tree(cls, node, ctx):
wrap_angle = node["wrap_angle"]
return Longitude(super().from_tree(node, ctx), wrap_angle=wrap_angle)
@classmethod
def to_tree(cls, longitude, ctx):
tree = super().to_tree(longitude, ctx)
tree["wrap_angle"] = longitude.wrap_angle
return tree
|
7bb57eff27df98c4369c93896952f55383796b38e94b0cf135d04270ffb5c119 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates import EarthLocation
from astropy.io.misc.asdf.types import AstropyType
class EarthLocationType(AstropyType):
name = "coordinates/earthlocation"
types = [EarthLocation]
version = "1.0.0"
@classmethod
def to_tree(cls, obj, ctx):
return obj.info._represent_as_dict()
@classmethod
def from_tree(cls, node, ctx):
return EarthLocation.info._construct_from_dict(node)
@classmethod
def assert_equal(cls, old, new):
return (old == new).all()
|
47d9112ccaae947c535c802c523a61decfc0bac5cc784caa460e7cc240ed5ad8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
def run_schema_example_test(organization, standard, name, version, check_func=None):
import asdf
from asdf.schema import load_schema
from asdf.tests import helpers
from asdf.types import format_tag
tag = format_tag(organization, standard, version, name)
uri = asdf.extension.default_extensions.extension_list.tag_mapping(tag)
r = asdf.extension.get_default_resolver()
examples = []
schema = load_schema(uri, resolver=r)
for node in asdf.treeutil.iter_tree(schema):
if (
isinstance(node, dict)
and "examples" in node
and isinstance(node["examples"], list)
):
for _, example in node["examples"]:
examples.append(example)
for example in examples:
buff = helpers.yaml_to_asdf("example: " + example.strip())
ff = asdf.AsdfFile(uri=uri)
# Add some dummy blocks so that the ndarray examples work
for i in range(3):
b = asdf.block.Block(np.zeros((1024 * 1024 * 8), dtype=np.uint8))
b._used = True
ff.blocks.add(b)
ff._open_impl(ff, buff, mode="r")
if check_func:
check_func(ff)
|
2bd7b59c38ce1ff3ab4efaab2f4b9847fa3a8f6c717c8f174700ba2719eac6bc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.io.misc.asdf.types import AstropyType
from astropy.units import equivalencies
from astropy.units.equivalencies import Equivalency
class EquivalencyType(AstropyType):
name = "units/equivalency"
types = [Equivalency]
version = "1.0.0"
@classmethod
def to_tree(cls, equiv, ctx):
if not isinstance(equiv, Equivalency):
raise TypeError(f"'{equiv}' is not a valid Equivalency")
eqs = []
for e, kwargs in zip(equiv.name, equiv.kwargs):
kwarg_names = list(kwargs.keys())
kwarg_values = list(kwargs.values())
eq = {"name": e, "kwargs_names": kwarg_names, "kwargs_values": kwarg_values}
eqs.append(eq)
return eqs
@classmethod
def from_tree(cls, node, ctx):
eqs = []
for eq in node:
equiv = getattr(equivalencies, eq["name"])
kwargs = dict(zip(eq["kwargs_names"], eq["kwargs_values"]))
eqs.append(equiv(**kwargs))
return sum(eqs[1:], eqs[0])
@classmethod
def assert_equal(cls, a, b):
assert a == b
|
928cf10fc31fc9bf1bac0642da10b7679b93c2704fd0eef5f03f7aaede3825c1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.io.misc.asdf.types import AstropyAsdfType
from astropy.units import Unit, UnitBase
class UnitType(AstropyAsdfType):
name = "unit/unit"
types = ["astropy.units.UnitBase"]
requires = ["astropy"]
@classmethod
def to_tree(cls, node, ctx):
if isinstance(node, str):
node = Unit(node, format="vounit", parse_strict="warn")
if isinstance(node, UnitBase):
return node.to_string(format="vounit")
raise TypeError(f"'{node}' is not a valid unit")
@classmethod
def from_tree(cls, node, ctx):
return Unit(node, format="vounit", parse_strict="silent")
|
e71ae19de058c132d6b9fbbead09c9d42588cb6f6d26ac0a561051f33e54e613 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from asdf.tags.core import NDArrayType
from astropy.io.misc.asdf.types import AstropyAsdfType
from astropy.units import Quantity
class QuantityType(AstropyAsdfType):
name = "unit/quantity"
types = ["astropy.units.Quantity"]
requires = ["astropy"]
version = "1.1.0"
@classmethod
def to_tree(cls, quantity, ctx):
node = {}
if isinstance(quantity, Quantity):
node["value"] = quantity.value
node["unit"] = quantity.unit
return node
raise TypeError(f"'{quantity}' is not a valid Quantity")
@classmethod
def from_tree(cls, node, ctx):
if isinstance(node, Quantity):
return node
unit = node["unit"]
value = node["value"]
if isinstance(value, NDArrayType):
value = value._make_array()
return Quantity(value, unit=unit)
|
90f3f6bf1047be8d4cfa1fc9b03408f18ce8a035f8c389c05c8ea1107c15d3e3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
import numpy as np
from asdf.tags.core.ndarray import NDArrayType
from asdf.tests import helpers
from packaging.version import Version
import astropy.units as u
from astropy import table
from astropy.coordinates import EarthLocation, SkyCoord
from astropy.coordinates.tests.helper import skycoord_equal
from astropy.io.misc.asdf.tags.tests.helpers import run_schema_example_test
from astropy.time import Time, TimeDelta
def test_table(tmpdir):
data_rows = [(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")]
t = table.Table(rows=data_rows, names=("a", "b", "c"), dtype=("i4", "f8", "S1"))
t.columns["a"].description = "RA"
t.columns["a"].unit = "degree"
t.columns["a"].meta = {"foo": "bar"}
t.columns["c"].description = "Some description of some sort"
def check(ff):
assert len(ff.blocks) == 3
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_array_columns(tmpdir):
a = np.array(
[
([[1, 2], [3, 4]], 2.0, "x"),
([[5, 6], [7, 8]], 5.0, "y"),
([[9, 10], [11, 12]], 8.2, "z"),
],
dtype=[("a", "<i4", (2, 2)), ("b", "<f8"), ("c", "|S1")],
)
t = table.Table(a, copy=False)
assert t.columns["a"].shape == (3, 2, 2)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_structured_array_columns(tmpdir):
a = np.array(
[((1, "a"), 2.0, "x"), ((4, "b"), 5.0, "y"), ((5, "c"), 8.2, "z")],
dtype=[("a", [("a0", "<i4"), ("a1", "|S1")]), ("b", "<f8"), ("c", "|S1")],
)
t = table.Table(a, copy=False)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_table_row_order(tmpdir):
a = np.array(
[(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")],
dtype=[("a", "<i4"), ("b", "<f8"), ("c", "|S1")],
)
t = table.Table(a, copy=False)
t.columns["a"].description = "RA"
t.columns["a"].unit = "degree"
t.columns["a"].meta = {"foo": "bar"}
t.columns["c"].description = "Some description of some sort"
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_table_inline(tmpdir):
data_rows = [(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")]
t = table.Table(rows=data_rows, names=("a", "b", "c"), dtype=("i4", "f8", "S1"))
t.columns["a"].description = "RA"
t.columns["a"].unit = "degree"
t.columns["a"].meta = {"foo": "bar"}
t.columns["c"].description = "Some description of some sort"
def check(ff):
assert len(list(ff.blocks.internal_blocks)) == 0
if Version(asdf.__version__) >= Version("2.8.0"):
# The auto_inline argument is deprecated as of asdf 2.8.0.
with asdf.config_context() as config:
config.array_inline_threshold = 64
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
else:
helpers.assert_roundtrip_tree(
{"table": t},
tmpdir,
asdf_check_func=check,
write_options={"auto_inline": 64},
)
def test_mismatched_columns():
yaml = """
table: !<tag:astropy.org:astropy/table/table-1.0.0>
columns:
- !core/column-1.0.0
data: !core/ndarray-1.0.0
data: [0, 1, 2]
name: a
- !core/column-1.0.0
data: !core/ndarray-1.0.0
data: [0, 1, 2, 3]
name: b
colnames: [a, b]
"""
buff = helpers.yaml_to_asdf(yaml)
with pytest.raises(ValueError) as err:
with asdf.open(buff):
pass
assert "Inconsistent data column lengths" in str(err.value)
def test_masked_table(tmpdir):
data_rows = [(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")]
t = table.Table(
rows=data_rows, names=("a", "b", "c"), dtype=("i4", "f8", "S1"), masked=True
)
t.columns["a"].description = "RA"
t.columns["a"].unit = "degree"
t.columns["a"].meta = {"foo": "bar"}
t.columns["a"].mask = [True, False, True]
t.columns["c"].description = "Some description of some sort"
def check(ff):
assert len(ff.blocks) == 4
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_quantity_mixin(tmpdir):
t = table.QTable()
t["a"] = [1, 2, 3]
t["b"] = ["x", "y", "z"]
t["c"] = [2.0, 5.0, 8.2] * u.m
def check(ff):
assert isinstance(ff["table"]["c"], u.Quantity)
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_time_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = Time(["2001-01-02T12:34:56", "2001-02-03T00:01:02"])
def check(ff):
assert isinstance(ff["table"]["c"], Time)
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_timedelta_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = TimeDelta([1, 2] * u.day)
def check(ff):
assert isinstance(ff["table"]["c"], TimeDelta)
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_skycoord_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = SkyCoord([1, 2], [3, 4], unit="deg,deg", frame="fk4", obstime="J1990.5")
def check(ff):
assert isinstance(ff["table"]["c"], SkyCoord)
def tree_match(old, new):
NDArrayType.assert_equal(new["a"], old["a"])
NDArrayType.assert_equal(new["b"], old["b"])
assert skycoord_equal(new["c"], old["c"])
helpers.assert_roundtrip_tree(
{"table": t}, tmpdir, asdf_check_func=check, tree_match_func=tree_match
)
def test_earthlocation_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
def check(ff):
assert isinstance(ff["table"]["c"], EarthLocation)
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_ndarray_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = table.NdarrayMixin([5, 6])
helpers.assert_roundtrip_tree({"table": t}, tmpdir)
def test_backwards_compat():
"""
Make sure that we can continue to read tables that use the schema from
the ASDF Standard.
This test uses the examples in the table schema from the ASDF Standard,
since these make no reference to Astropy's own table definition.
"""
def check(asdffile):
assert isinstance(asdffile["example"], table.Table)
run_schema_example_test("stsci.edu", "asdf", "core/table", "1.0.0", check)
|
2d5aa455df1d26f3febbb84f95449201363b0901022aa823ddb1c3d0578b7d3d | import pytest
from astropy.io.misc.asdf.tests import ASDF_ENTRY_INSTALLED
if not ASDF_ENTRY_INSTALLED:
pytest.skip(
"The astropy asdf entry points are not installed", allow_module_level=True
)
|
71405244703c9a767d87cc1b3c1a1ea5b4956d3cf295770774c94dd7dcc19c83 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
import os
import numpy as np
from asdf.tests import helpers
from astropy.io import fits
from astropy.io.misc.asdf.tags.tests.helpers import run_schema_example_test
def test_complex_structure(tmpdir):
with fits.open(
os.path.join(os.path.dirname(__file__), "data", "complex.fits"), memmap=False
) as hdulist:
tree = {"fits": hdulist}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fits_table(tmpdir):
a = np.array([(0, 1), (2, 3)], dtype=[("A", int), ("B", int)])
h = fits.HDUList()
h.append(fits.BinTableHDU.from_columns(a))
tree = {"fits": h}
def check_yaml(content):
assert b"!<tag:astropy.org:astropy/table/table-1.0.0>" in content
helpers.assert_roundtrip_tree(tree, tmpdir, raw_yaml_check_func=check_yaml)
def test_backwards_compat():
"""
Make sure that we can continue to read FITS HDUs that use the schema from
the ASDF Standard.
This test uses the examples in the fits schema from the ASDF Standard,
since these make no reference to Astropy's own fits definition.
"""
def check(asdffile):
assert isinstance(asdffile["example"], fits.HDUList)
run_schema_example_test("stsci.edu", "asdf", "fits/fits", "1.0.0", check)
|
6902bfecedb1a0097910cf3394c4d54ed98bbd7814bec75a248fc2d84867be16 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units as u
from astropy.modeling.models import UnitsMapping
def assert_model_roundtrip(model, tmpdir):
tree = {"model": model}
assert_roundtrip_tree(tree, tmpdir, tree_match_func=assert_models_equal)
def assert_models_equal(a, b):
assert a.name == b.name
assert a.inputs == b.inputs
assert a.input_units == b.input_units
assert a.outputs == b.outputs
assert a.mapping == b.mapping
assert a.input_units_allow_dimensionless == b.input_units_allow_dimensionless
for i in a.inputs:
if a.input_units_equivalencies is None:
a_equiv = None
else:
a_equiv = a.input_units_equivalencies.get(i)
if b.input_units_equivalencies is None:
b_equiv = None
else:
b_equiv = b.input_units_equivalencies.get(i, None)
assert a_equiv == b_equiv
def test_basic(tmpdir):
m = UnitsMapping(((u.m, u.dimensionless_unscaled),))
assert_model_roundtrip(m, tmpdir)
def test_remove_units(tmpdir):
m = UnitsMapping(((u.m, None),))
assert_model_roundtrip(m, tmpdir)
def test_accept_any_units(tmpdir):
m = UnitsMapping(((None, u.m),))
assert_model_roundtrip(m, tmpdir)
def test_with_equivalencies(tmpdir):
m = UnitsMapping(
((u.m, u.dimensionless_unscaled),),
input_units_equivalencies={"x": u.equivalencies.spectral()},
)
assert_model_roundtrip(m, tmpdir)
def test_with_allow_dimensionless(tmpdir):
m = UnitsMapping(
((u.m, u.dimensionless_unscaled), (u.s, u.Hz)),
input_units_allow_dimensionless=True,
)
assert_model_roundtrip(m, tmpdir)
m = UnitsMapping(
((u.m, u.dimensionless_unscaled), (u.s, u.Hz)),
input_units_allow_dimensionless={"x0": True, "x1": False},
)
assert_model_roundtrip(m, tmpdir)
|
49a13af678d6cf49ce6776f45573c1dc9e50f3ebec7228ca9f7ead5311dc7658 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
import warnings
import asdf
import numpy as np
from asdf import AsdfFile, util
from asdf.tests import helpers
from packaging.version import Version
import astropy.units as u
from astropy.modeling import models as astmodels
from astropy.modeling.core import fix_inputs
from astropy.utils.compat.optional_deps import HAS_SCIPY
def custom_and_analytical_inverse():
p1 = astmodels.Polynomial1D(1)
p2 = astmodels.Polynomial1D(1)
p3 = astmodels.Polynomial1D(1)
p4 = astmodels.Polynomial1D(1)
m1 = p1 & p2
m2 = p3 & p4
m1.inverse = m2
return m1
def custom_inputs_outputs():
m = astmodels.Gaussian2D()
m.inputs = ("a", "b")
m.outputs = ("c",)
return m
test_models = [
astmodels.Identity(2),
astmodels.Polynomial1D(2, c0=1, c1=2, c2=3),
astmodels.Polynomial2D(1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Shift(2.0),
astmodels.Hermite1D(2, c0=2, c1=3, c2=0.5),
astmodels.Legendre1D(2, c0=2, c1=3, c2=0.5),
astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5),
astmodels.Chebyshev2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Legendre2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Hermite2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Scale(3.4),
astmodels.RotateNative2Celestial(5.63, -72.5, 180),
astmodels.Multiply(3),
astmodels.Multiply(10 * u.m),
astmodels.RotateCelestial2Native(5.63, -72.5, 180),
astmodels.EulerAngleRotation(23, 14, 2.3, axes_order="xzx"),
astmodels.Mapping((0, 1), n_inputs=3),
astmodels.Shift(2.0 * u.deg),
astmodels.Scale(3.4 * u.deg),
astmodels.RotateNative2Celestial(5.63 * u.deg, -72.5 * u.deg, 180 * u.deg),
astmodels.RotateCelestial2Native(5.63 * u.deg, -72.5 * u.deg, 180 * u.deg),
astmodels.RotationSequence3D([1.2, 2.3, 3.4, 0.3], "xyzx"),
astmodels.SphericalRotationSequence([1.2, 2.3, 3.4, 0.3], "xyzy"),
astmodels.AiryDisk2D(amplitude=10.0, x_0=0.5, y_0=1.5),
astmodels.Box1D(amplitude=10.0, x_0=0.5, width=5.0),
astmodels.Box2D(amplitude=10.0, x_0=0.5, x_width=5.0, y_0=1.5, y_width=7.0),
astmodels.Const1D(amplitude=5.0),
astmodels.Const2D(amplitude=5.0),
astmodels.Disk2D(amplitude=10.0, x_0=0.5, y_0=1.5, R_0=5.0),
astmodels.Ellipse2D(amplitude=10.0, x_0=0.5, y_0=1.5, a=2.0, b=4.0, theta=0.1),
astmodels.Exponential1D(amplitude=10.0, tau=3.5),
astmodels.Gaussian1D(amplitude=10.0, mean=5.0, stddev=3.0),
astmodels.Gaussian2D(
amplitude=10.0, x_mean=5.0, y_mean=5.0, x_stddev=3.0, y_stddev=3.0
),
astmodels.KingProjectedAnalytic1D(amplitude=10.0, r_core=5.0, r_tide=2.0),
astmodels.Logarithmic1D(amplitude=10.0, tau=3.5),
astmodels.Lorentz1D(amplitude=10.0, x_0=0.5, fwhm=2.5),
astmodels.Moffat1D(amplitude=10.0, x_0=0.5, gamma=1.2, alpha=2.5),
astmodels.Moffat2D(amplitude=10.0, x_0=0.5, y_0=1.5, gamma=1.2, alpha=2.5),
astmodels.Planar2D(slope_x=0.5, slope_y=1.2, intercept=2.5),
astmodels.RedshiftScaleFactor(z=2.5),
astmodels.RickerWavelet1D(amplitude=10.0, x_0=0.5, sigma=1.2),
astmodels.RickerWavelet2D(amplitude=10.0, x_0=0.5, y_0=1.5, sigma=1.2),
astmodels.Ring2D(amplitude=10.0, x_0=0.5, y_0=1.5, r_in=5.0, width=10.0),
astmodels.Sersic1D(amplitude=10.0, r_eff=1.0, n=4.0),
astmodels.Sersic2D(
amplitude=10.0, r_eff=1.0, n=4.0, x_0=0.5, y_0=1.5, ellip=0.0, theta=0.0
),
astmodels.Sine1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.Cosine1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.Tangent1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.ArcSine1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.ArcCosine1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.ArcTangent1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.Trapezoid1D(amplitude=10.0, x_0=0.5, width=5.0, slope=1.0),
astmodels.TrapezoidDisk2D(amplitude=10.0, x_0=0.5, y_0=1.5, R_0=5.0, slope=1.0),
astmodels.Voigt1D(x_0=0.55, amplitude_L=10.0, fwhm_L=0.5, fwhm_G=0.9),
astmodels.BlackBody(scale=10.0, temperature=6000.0 * u.K),
astmodels.Drude1D(amplitude=10.0, x_0=0.5, fwhm=2.5),
astmodels.Plummer1D(mass=10.0, r_plum=5.0),
astmodels.BrokenPowerLaw1D(amplitude=10, x_break=0.5, alpha_1=2.0, alpha_2=3.5),
astmodels.ExponentialCutoffPowerLaw1D(10, 0.5, 2.0, 7.0),
astmodels.LogParabola1D(
amplitude=10,
x_0=0.5,
alpha=2.0,
beta=3.0,
),
astmodels.PowerLaw1D(amplitude=10.0, x_0=0.5, alpha=2.0),
astmodels.SmoothlyBrokenPowerLaw1D(
amplitude=10.0, x_break=5.0, alpha_1=2.0, alpha_2=3.0, delta=0.5
),
custom_and_analytical_inverse(),
custom_inputs_outputs(),
]
if HAS_SCIPY:
test_models.append(
astmodels.Spline1D(
np.array([-3.0, -3.0, -3.0, -3.0, -1.0, 0.0, 1.0, 3.0, 3.0, 3.0, 3.0]),
np.array(
[
0.10412331,
0.07013616,
-0.18799552,
1.35953147,
-0.15282581,
0.03923,
-0.04297299,
0.0,
0.0,
0.0,
0.0,
]
),
3,
)
)
math_models = []
for kl in astmodels.math.__all__:
klass = getattr(astmodels.math, kl)
math_models.append(klass())
test_models.extend(math_models)
test_models_with_constraints = [
astmodels.Legendre2D(
x_degree=1,
y_degree=1,
c0_0=1,
c0_1=2,
c1_0=3,
fixed={"c1_0": True, "c0_1": True},
bounds={"c0_0": (-10, 10)},
)
]
test_models.extend(test_models_with_constraints)
def test_transforms_compound(tmpdir):
tree = {
"compound": astmodels.Shift(1) & astmodels.Shift(2)
| astmodels.Sky2Pix_TAN()
| astmodels.Rotation2D()
| astmodels.AffineTransformation2D([[2, 0], [0, 2]], [42, 32])
+ astmodels.Rotation2D(32)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_inverse_transforms(tmpdir):
rotation = astmodels.Rotation2D(32)
rotation.inverse = astmodels.Rotation2D(45)
real_rotation = astmodels.Rotation2D(32)
tree = {"rotation": rotation, "real_rotation": real_rotation}
def check(ff):
assert ff.tree["rotation"].inverse.angle == 45
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
@pytest.mark.parametrize("model", test_models)
def test_single_model(tmpdir, model):
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.6.0 which causes warnings
if Version(asdf.__version__) <= Version("2.6.0"):
warnings.filterwarnings("ignore", "Unable to locate schema file")
tree = {"single_model": model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_name(tmpdir):
def check(ff):
assert ff.tree["rot"].name == "foo"
tree = {"rot": astmodels.Rotation2D(23, name="foo")}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
def test_zenithal_with_arguments(tmpdir):
tree = {"azp": astmodels.Sky2Pix_AZP(0.5, 0.3)}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_naming_of_compound_model(tmpdir):
"""Issue #87"""
def asdf_check(ff):
assert ff.tree["model"].name == "compound_model"
offx = astmodels.Shift(1)
scl = astmodels.Scale(2)
model = (offx | scl).rename("compound_model")
tree = {"model": model}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=asdf_check)
@pytest.mark.slow
def test_generic_projections(tmpdir):
from astropy.io.misc.asdf.tags.transform import projections
for tag_name, (name, params, version) in projections._generic_projections.items():
tree = {
"forward": util.resolve_name(
f"astropy.modeling.projections.Sky2Pix_{name}"
)(),
"backward": util.resolve_name(
f"astropy.modeling.projections.Pix2Sky_{name}"
)(),
}
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.4.2 which causes warnings
if Version(asdf.__version__) <= Version("2.5.1"):
warnings.filterwarnings("ignore", "Unable to locate schema file")
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model(tmpdir):
points = np.arange(0, 5)
values = [1.0, 10, 2, 45, -3]
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {"model": model}
helpers.assert_roundtrip_tree(tree, tmpdir)
table = np.array([[3.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 0.0]])
points = ([1, 2, 3], [1, 2, 3])
model2 = astmodels.Tabular2D(
points,
lookup_table=table,
bounds_error=False,
fill_value=None,
method="nearest",
)
tree = {"model": model2}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_bounding_box(tmpdir):
model = astmodels.Shift(1) & astmodels.Shift(2)
model.bounding_box = ((1, 3), (2, 4))
tree = {"model": model}
helpers.assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize("standard_version", asdf.versioning.supported_versions)
def test_const1d(tmpdir, standard_version):
helpers.assert_roundtrip_tree(
{"model": astmodels.Const1D(amplitude=5.0)},
tmpdir,
init_options={"version": standard_version},
)
@pytest.mark.parametrize("standard_version", asdf.versioning.supported_versions)
@pytest.mark.parametrize(
"model",
[
astmodels.Polynomial1D(1, c0=5, c1=17),
astmodels.Polynomial1D(1, c0=5, c1=17, domain=[-5, 4], window=[-2, 3]),
astmodels.Polynomial2D(2, c0_0=3, c1_0=5, c0_1=7),
astmodels.Polynomial2D(
2,
c0_0=3,
c1_0=5,
c0_1=7,
x_domain=[-2, 2],
y_domain=[-4, 4],
x_window=[-6, 6],
y_window=[-8, 8],
),
],
)
def test_polynomial(tmpdir, standard_version, model):
helpers.assert_roundtrip_tree(
{"model": model}, tmpdir, init_options={"version": standard_version}
)
def test_domain_orthopoly(tmpdir):
model1d = astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5, domain=[-2, 2])
model2d = astmodels.Chebyshev2D(
1, 1, c0_0=1, c0_1=2, c1_0=3, x_domain=[-2, 2], y_domain=[-2, 2]
)
fa = AsdfFile()
fa.tree["model1d"] = model1d
fa.tree["model2d"] = model2d
file_path = str(tmpdir.join("orthopoly_domain.asdf"))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree["model1d"](1.8) == model1d(1.8)
assert f.tree["model2d"](1.8, -1.5) == model2d(1.8, -1.5)
def test_window_orthopoly(tmpdir):
model1d = astmodels.Chebyshev1D(
2, c0=2, c1=3, c2=0.5, domain=[-2, 2], window=[-0.5, 0.5]
)
model2d = astmodels.Chebyshev2D(
1,
1,
c0_0=1,
c0_1=2,
c1_0=3,
x_domain=[-2, 2],
y_domain=[-2, 2],
x_window=[-0.5, 0.5],
y_window=[-0.1, 0.5],
)
fa = AsdfFile()
fa.tree["model1d"] = model1d
fa.tree["model2d"] = model2d
file_path = str(tmpdir.join("orthopoly_window.asdf"))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree["model1d"](1.8) == model1d(1.8)
assert f.tree["model2d"](1.8, -1.5) == model2d(1.8, -1.5)
def test_linear1d(tmpdir):
model = astmodels.Linear1D()
tree = {"model": model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_linear1d_quantity(tmpdir):
model = astmodels.Linear1D(1 * u.nm, 1 * (u.nm / u.pixel))
tree = {"model": model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model_units(tmpdir):
points = np.arange(0, 5) * u.pix
values = [1.0, 10, 2, 45, -3] * u.nm
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {"model": model}
helpers.assert_roundtrip_tree(tree, tmpdir)
table = np.array([[3.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 0.0]]) * u.nm
points = ([1, 2, 3], [1, 2, 3]) * u.pix
model2 = astmodels.Tabular2D(
points,
lookup_table=table,
bounds_error=False,
fill_value=None,
method="nearest",
)
tree = {"model": model2}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fix_inputs(tmpdir):
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.4.2 which causes warnings
if Version(asdf.__version__) <= Version("2.5.1"):
warnings.filterwarnings("ignore", "Unable to locate schema file")
model0 = astmodels.Pix2Sky_TAN()
model0.input_units_equivalencies = {
"x": u.dimensionless_angles(),
"y": u.dimensionless_angles(),
}
model1 = astmodels.Rotation2D()
model = model0 | model1
tree = {
"compound": fix_inputs(model, {"x": 45}),
"compound1": fix_inputs(model, {0: 45}),
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fix_inputs_type(tmpdir):
with pytest.raises(TypeError):
tree = {"compound": fix_inputs(3, {"x": 45})}
helpers.assert_roundtrip_tree(tree, tmpdir)
with pytest.raises(AttributeError):
tree = {"compound": astmodels.Pix2Sky_TAN() & {"x": 45}}
helpers.assert_roundtrip_tree(tree, tmpdir)
comp_model = custom_and_analytical_inverse()
@pytest.mark.parametrize(
"model",
[
astmodels.Shift(1) & astmodels.Shift(2) | comp_model,
comp_model | astmodels.Shift(1) & astmodels.Shift(2),
astmodels.Shift(1) & comp_model,
comp_model & astmodels.Shift(1),
],
)
def test_custom_and_analytical(model, tmpdir):
fa = AsdfFile()
fa.tree["model"] = model
file_path = str(tmpdir.join("custom_and_analytical_inverse.asdf"))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree["model"].inverse is not None
def test_deserialize_compound_user_inverse(tmpdir):
"""
Confirm that we are able to correctly reconstruct a
compound model with a user inverse set on one of its
component models.
Due to code in TransformType that facilitates circular
inverses, the user inverse of the component model is
not available at the time that the CompoundModel is
constructed.
"""
yaml = """
model: !transform/concatenate-1.2.0
forward:
- !transform/shift-1.2.0
inverse: !transform/shift-1.2.0 {offset: 5.0}
offset: -10.0
- !transform/shift-1.2.0 {offset: -20.0}
"""
buff = helpers.yaml_to_asdf(yaml)
with asdf.open(buff) as af:
model = af["model"]
assert model.has_inverse()
assert model.inverse(-5, -20) == (0, 0)
# test some models and compound models with some input unit equivalencies
def models_with_input_eq():
# 1D model
m1 = astmodels.Shift(1 * u.kg)
m1.input_units_equivalencies = {"x": u.mass_energy()}
# 2D model
m2 = astmodels.Const2D(10 * u.Hz)
m2.input_units_equivalencies = {
"x": u.dimensionless_angles(),
"y": u.dimensionless_angles(),
}
# 2D model with only one input equivalencies
m3 = astmodels.Const2D(10 * u.Hz)
m3.input_units_equivalencies = {"x": u.dimensionless_angles()}
# model using equivalency that has args using units
m4 = astmodels.PowerLaw1D(amplitude=1 * u.m, x_0=10 * u.pix, alpha=7)
m4.input_units_equivalencies = {
"x": u.equivalencies.pixel_scale(0.5 * u.arcsec / u.pix)
}
return [m1, m2, m3, m4]
def compound_models_with_input_eq():
m1 = astmodels.Gaussian1D(10 * u.K, 11 * u.arcsec, 12 * u.arcsec)
m1.input_units_equivalencies = {"x": u.parallax()}
m2 = astmodels.Gaussian1D(5 * u.s, 2 * u.K, 3 * u.K)
m2.input_units_equivalencies = {"x": u.temperature()}
return [m1 | m2, m1 & m2, m1 + m2]
test_models.extend(models_with_input_eq())
test_models.extend(compound_models_with_input_eq())
|
94d0eea2ce55eddf1f7a72413afce94fea1e6ec4d204cf9f9cea3c7e41422e9e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units as u
from astropy.time import Time, TimeDelta
@pytest.mark.parametrize("fmt", TimeDelta.FORMATS.keys())
def test_timedelta(fmt, tmpdir):
t1 = Time(Time.now())
t2 = Time(Time.now())
td = TimeDelta(t2 - t1, format=fmt)
tree = dict(timedelta=td)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize("scale", list(TimeDelta.SCALES) + [None])
def test_timedelta_scales(scale, tmpdir):
tree = dict(timedelta=TimeDelta(0.125, scale=scale, format="jd"))
assert_roundtrip_tree(tree, tmpdir)
def test_timedelta_vector(tmpdir):
tree = dict(timedelta=TimeDelta([1, 2] * u.day))
assert_roundtrip_tree(tree, tmpdir)
|
2ef97d26cbad7e8db5e3dfa65aa78d6a21e387683f6557137fa4f0f9512fc1c2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
import datetime
import asdf.schema as asdf_schema
import numpy as np
from asdf import AsdfFile, tagged, yamlutil
from asdf.tests import helpers
from astropy import time
def _flatten_combiners(schema):
newschema = dict()
def add_entry(path, schema, combiner):
# TODO: Simplify?
cursor = newschema
for i in range(len(path)):
part = path[i]
if isinstance(part, int):
cursor = cursor.setdefault("items", [])
while len(cursor) <= part:
cursor.append({})
cursor = cursor[part]
elif part == "items":
cursor = cursor.setdefault("items", dict())
else:
cursor = cursor.setdefault("properties", dict())
if i < len(path) - 1 and isinstance(path[i + 1], int):
cursor = cursor.setdefault(part, [])
else:
cursor = cursor.setdefault(part, dict())
cursor.update(schema)
def test_time(tmpdir):
time_array = time.Time(np.arange(100), format="unix")
tree = {"large_time_array": time_array}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_with_location(tmpdir):
# See https://github.com/spacetelescope/asdf/issues/341
from astropy import units as u
from astropy.coordinates.earth import EarthLocation
location = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m)
t = time.Time([1, 2], location=location, format="cxcsec")
tree = {"time": t}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_with_location_1_0_0(tmpdir):
from astropy import units as u
from astropy.coordinates.earth import EarthLocation
location = EarthLocation(x=6378100 * u.m, y=0 * u.m, z=0 * u.m)
t = time.Time("J2000.000", location=location, format="jyear_str")
tree = {"time": t}
# The version refers to ASDF Standard 1.0.0, which includes time-1.0.0
helpers.assert_roundtrip_tree(tree, tmpdir, init_options={"version": "1.0.0"})
def test_isot(tmpdir):
isot = time.Time("2000-01-01T00:00:00.000")
tree = {"time": isot}
helpers.assert_roundtrip_tree(tree, tmpdir)
ff = asdf.AsdfFile(tree)
tree = yamlutil.custom_tree_to_tagged_tree(ff.tree, ff)
if isinstance(tree["time"], str):
assert str(tree["time"]) == isot.value
elif isinstance(tree["time"], dict):
assert str(tree["time"]["value"]) == isot.value
assert str(tree["time"]["base_format"]) == "isot"
else:
assert False
def test_isot_array(tmpdir):
tree = {"time": time.Time(["2001-01-02T12:34:56", "2001-02-03T00:01:02"])}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_tag():
schema = asdf_schema.load_schema(
"http://stsci.edu/schemas/asdf/time/time-1.1.0", resolve_references=True
)
schema = _flatten_combiners(schema)
date = time.Time(datetime.datetime.now())
tree = {"date": date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree["date"], asdf)
asdf_schema.validate(instance, schema=schema)
tag = "tag:stsci.edu:asdf/time/time-1.1.0"
date = tagged.tag_object(tag, date)
tree = {"date": date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree["date"], asdf)
asdf_schema.validate(instance, schema=schema)
|
60cab1942f61d4204142b1f3ff11b826fbf46fdb0ae51763b2c15bcad8cf9f31 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units
from astropy.coordinates import FK5, ICRS, Angle, Latitude, Longitude
def test_hcrs_basic(tmpdir):
ra = Longitude(25, unit=units.deg)
dec = Latitude(45, unit=units.deg)
tree = {"coord": ICRS(ra=ra, dec=dec)}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_basic(tmpdir):
wrap_angle = Angle(1.5, unit=units.rad)
ra = Longitude(25, unit=units.deg, wrap_angle=wrap_angle)
dec = Latitude(45, unit=units.deg)
tree = {"coord": ICRS(ra=ra, dec=dec)}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_nodata(tmpdir):
tree = {"coord": ICRS()}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_compound(tmpdir):
icrs = ICRS(ra=[0, 1, 2] * units.deg, dec=[3, 4, 5] * units.deg)
tree = {"coord": icrs}
assert_roundtrip_tree(tree, tmpdir)
def test_fk5_time(tmpdir):
tree = {"coord": FK5(equinox="2011-01-01T00:00:00")}
assert_roundtrip_tree(tree, tmpdir)
|
4150e3f21e25320e28971c273bb569ab8c7fc4ed8a08d674a371bf571f50a827 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units as u
from astropy.coordinates.angles import Latitude, Longitude
from astropy.coordinates.earth import ELLIPSOIDS, EarthLocation
@pytest.fixture
def position():
lon = Longitude(
[0.0, 45.0, 90.0, 135.0, 180.0, -180, -90, -45], u.deg, wrap_angle=180 * u.deg
)
lat = Latitude([+0.0, 30.0, 60.0, +90.0, -90.0, -60.0, -30.0, 0.0], u.deg)
h = u.Quantity([0.1, 0.5, 1.0, -0.5, -1.0, +4.2, -11.0, -0.1], u.m)
return lon, lat, h
def test_earthlocation_quantity(tmpdir):
location = EarthLocation(
lat=34.4900 * u.deg, lon=-104.221800 * u.deg, height=40 * u.km
)
tree = dict(location=location)
assert_roundtrip_tree(tree, tmpdir)
def test_earthlocation(position, tmpdir):
x, y, z = EarthLocation.from_geodetic(*position).to_geocentric()
geocentric = EarthLocation(x, y, z)
tree = dict(location=geocentric)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize("ellipsoid", ELLIPSOIDS)
def test_earthlocation_geodetic(position, ellipsoid, tmpdir):
location = EarthLocation.from_geodetic(*position, ellipsoid=ellipsoid)
tree = dict(location=location)
assert_roundtrip_tree(tree, tmpdir)
def test_earthlocation_site(tmpdir):
orig_sites = getattr(EarthLocation, "_site_registry", None)
try:
EarthLocation._get_site_registry(force_builtin=True)
rog = EarthLocation.of_site("greenwich")
tree = dict(location=rog)
assert_roundtrip_tree(tree, tmpdir)
finally:
EarthLocation._site_registry = orig_sites
|
83499191bc02598a88e35882038fd00482b5a0e959a7e8cb186b9b17f8490109 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import FK4, ICRS, Galactic, Longitude, SkyCoord
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree
# These tests are cribbed directly from the Examples section of
# https://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html
def test_scalar_skycoord(tmpdir):
c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_vector_skycoord(tmpdir):
c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_fk4(tmpdir):
coords = ["1:12:43.2 +1:12:43", "1 12 43.2 +1 12 43"]
c = SkyCoord(coords, frame=FK4, unit=(u.deg, u.hourangle), obstime="J1992.21")
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize(
"coord",
[
SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic), # Units from string
SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s"),
],
)
def test_skycoord_galactic(coord, tmpdir):
tree = dict(coord=coord)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_ra_dec(tmpdir):
ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
c = SkyCoord(ra, dec, frame="icrs")
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime="2001-01-02T12:34:56")
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_override_defaults(tmpdir):
c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
c = SkyCoord(c, obstime="J2010.11", equinox="B1965") # Override defaults
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_cartesian(tmpdir):
c = SkyCoord(
w=0, u=1, v=2, unit="kpc", frame="galactic", representation_type="cartesian"
)
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_vector_frames(tmpdir):
c = SkyCoord([ICRS(ra=1 * u.deg, dec=2 * u.deg), ICRS(ra=3 * u.deg, dec=4 * u.deg)])
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.xfail(reason="Velocities are not properly serialized yet")
def test_skycoord_radial_velocity(tmpdir):
c = SkyCoord(ra=1 * u.deg, dec=2 * u.deg, radial_velocity=10 * u.km / u.s)
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.xfail(reason="Velocities are not properly serialized yet")
def test_skycoord_proper_motion(tmpdir):
c = SkyCoord(
ra=1 * u.deg,
dec=2 * u.deg,
pm_ra_cosdec=2 * u.mas / u.yr,
pm_dec=1 * u.mas / u.yr,
)
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.skip(reason="Apparent loss of precision during serialization")
def test_skycoord_extra_attribute(tmpdir):
sc = SkyCoord(10 * u.deg, 20 * u.deg, equinox="2011-01-01T00:00", frame="fk4")
tree = dict(coord=sc.transform_to("icrs"))
def check_asdf(asdffile):
assert hasattr(asdffile["coord"], "equinox")
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check_asdf)
def test_skycoord_2d_obstime(tmpdir):
sc = (
SkyCoord(
[1, 2],
[3, 4],
[5, 6],
unit="deg,deg,m",
frame="fk4",
obstime=["J1990.5", "J1991.5"],
),
)
tree = dict(coord=sc)
assert_roundtrip_tree(tree, tmpdir)
|
b0a7da3e10aa18e06d9f09a3c15d29321aef32a65ba594e59fa920f0d9f271c3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree
import astropy.units as u
from astropy.coordinates import Angle, Latitude, Longitude
def test_angle(tmpdir):
tree = {"angle": Angle(100, u.deg)}
assert_roundtrip_tree(tree, tmpdir)
def test_latitude(tmpdir):
tree = {"angle": Latitude(10, u.deg)}
assert_roundtrip_tree(tree, tmpdir)
def test_longitude(tmpdir):
tree = {"angle": Longitude(-100, u.deg, wrap_angle=180 * u.deg)}
assert_roundtrip_tree(tree, tmpdir)
|
5a1dd1dabd0c051701f83c8df28cd64d840b647984cc3d92e94972091f0e75e9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree
from numpy.random import randint, random
import astropy.coordinates.representation as r
import astropy.units as u
from astropy.coordinates import Angle
@pytest.fixture(params=filter(lambda x: "Base" not in x, r.__all__))
def representation(request):
rep = getattr(r, request.param)
angle_unit = u.deg
other_unit = u.km
kwargs = {}
arr_len = randint(1, 100)
for aname, atype in rep.attr_classes.items():
if issubclass(atype, Angle):
value = ([random()] * arr_len) * angle_unit
else:
value = ([random()] * arr_len) * other_unit
kwargs[aname] = value
return rep(**kwargs)
def test_representations(tmpdir, representation):
tree = {"representation": representation}
assert_roundtrip_tree(tree, tmpdir)
|
643301497829583fa9c3cecf978c746a96eee714f56694e0d83241564fe8eb7b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy import units as u
from astropy.coordinates import ICRS, Galactic, SpectralCoord
from astropy.tests.helper import assert_quantity_allclose
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree
def test_scalar_spectralcoord(tmpdir):
sc = SpectralCoord(565 * u.nm)
tree = dict(spectralcoord=sc)
def check(asdffile):
assert isinstance(asdffile["spectralcoord"], SpectralCoord)
assert_quantity_allclose(asdffile["spectralcoord"].quantity, 565 * u.nm)
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
def test_vector_spectralcoord(tmpdir):
sc = SpectralCoord([100, 200, 300] * u.GHz)
tree = dict(spectralcoord=sc)
def check(asdffile):
assert isinstance(asdffile["spectralcoord"], SpectralCoord)
assert_quantity_allclose(
asdffile["spectralcoord"].quantity, [100, 200, 300] * u.GHz
)
assert_roundtrip_tree(
tree, tmpdir, asdf_check_func=check, tree_match_func=assert_quantity_allclose
)
@pytest.mark.filterwarnings("ignore:No velocity")
def test_spectralcoord_with_obstarget(tmpdir):
sc = SpectralCoord(
10 * u.GHz,
observer=ICRS(1 * u.km, 2 * u.km, 3 * u.km, representation_type="cartesian"),
target=Galactic(10 * u.deg, 20 * u.deg, distance=30 * u.pc),
)
tree = dict(spectralcoord=sc)
def check(asdffile):
assert isinstance(asdffile["spectralcoord"], SpectralCoord)
assert_quantity_allclose(asdffile["spectralcoord"].quantity, 10 * u.GHz)
assert isinstance(asdffile["spectralcoord"].observer, ICRS)
assert isinstance(asdffile["spectralcoord"].target, Galactic)
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
|
6620b071c7f4aa5ca087e15c51c8e63a36123c05d3d51362bd09ff79ee8b35ba | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import pytest
asdf = pytest.importorskip("asdf")
from asdf.tests import helpers
from astropy import units as u
# TODO: Implement defunit
def test_unit():
yaml = """
unit: !unit/unit-1.0.0 "2.1798721 10-18kg m2 s-2"
"""
buff = helpers.yaml_to_asdf(yaml)
with asdf.open(buff) as ff:
assert ff.tree["unit"].is_equivalent(u.Ry)
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.open(buff2) as ff:
assert ff.tree["unit"].is_equivalent(u.Ry)
|
5f519be11723ce07716387f1a17436491c77fa6191cccdd8ee72cdb4f5908209 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
import io
from asdf.tests import helpers
from astropy import units
def roundtrip_quantity(yaml, quantity):
buff = helpers.yaml_to_asdf(yaml)
with asdf.open(buff) as ff:
assert (ff.tree["quantity"] == quantity).all()
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.open(buff2) as ff:
assert (ff.tree["quantity"] == quantity).all()
def test_value_scalar(tmpdir):
testval = 2.71828
testunit = units.kpc
yaml = f"""
quantity: !unit/quantity-1.1.0
value: {testval}
unit: {testunit}
"""
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_array(tmpdir):
testval = [3.14159]
testunit = units.kg
yaml = f"""
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {testval}
unit: {testunit}
"""
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_multiarray(tmpdir):
testval = [x * 2.3081 for x in range(10)]
testunit = units.ampere
yaml = f"""
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {testval}
unit: {testunit}
"""
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_ndarray(tmpdir):
from numpy import array, float64
testval = [[1, 2, 3], [4, 5, 6]]
testunit = units.km
yaml = f"""
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0
datatype: float64
data:
{testval}
unit: {testunit}
"""
data = array(testval, float64)
quantity = units.Quantity(data, unit=testunit)
roundtrip_quantity(yaml, quantity)
|
341aff3019476a3f035bce88f26f27e8ef8c19e76e1cb6a5686566d37c1542c8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy import units as u
from astropy.cosmology import Planck15
from astropy.cosmology.units import with_H0
from astropy.units import equivalencies as eq
asdf = pytest.importorskip("asdf", minversion="2.3.0.dev0")
from asdf.tests import helpers
def get_equivalencies():
"""
Return a list of example equivalencies for testing serialization.
"""
return [
eq.plate_scale(0.3 * u.deg / u.mm),
eq.pixel_scale(0.5 * u.deg / u.pix),
eq.pixel_scale(100.0 * u.pix / u.cm),
eq.spectral_density(350 * u.nm, factor=2),
eq.spectral_density(350 * u.nm),
eq.spectral(),
eq.brightness_temperature(500 * u.GHz),
eq.brightness_temperature(500 * u.GHz, beam_area=23 * u.sr),
eq.temperature_energy(),
eq.temperature(),
eq.thermodynamic_temperature(300 * u.Hz),
eq.thermodynamic_temperature(140 * u.GHz, Planck15.Tcmb0),
eq.beam_angular_area(3 * u.sr),
eq.mass_energy(),
eq.molar_mass_amu(),
eq.doppler_relativistic(2 * u.m),
eq.doppler_optical(2 * u.nm),
eq.doppler_radio(2 * u.Hz),
eq.parallax(),
eq.logarithmic(),
eq.dimensionless_angles(),
eq.spectral() + eq.temperature(),
(
eq.spectral_density(35 * u.nm)
+ eq.brightness_temperature(5 * u.Hz, beam_area=2 * u.sr)
),
(
eq.spectral()
+ eq.spectral_density(35 * u.nm)
+ eq.brightness_temperature(5 * u.Hz, beam_area=2 * u.sr)
),
with_H0(),
]
@pytest.mark.parametrize("equiv", get_equivalencies())
@pytest.mark.filterwarnings(
"ignore:`with_H0` is deprecated from `astropy.units.equivalencies` "
"since astropy 5.0 and may be removed in a future version. "
"Use `astropy.cosmology.units.with_H0` instead."
)
def test_equivalencies(tmpdir, equiv):
tree = {"equiv": equiv}
helpers.assert_roundtrip_tree(tree, tmpdir)
|
7dc1449fc36a5fb3873f47a8dc3eaf13ccdbb251449df52cd03ab83e2de723a3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from io import StringIO
from astropy.nddata import CCDData
from astropy.table import Table
def test_table_read_help_fits():
"""
Test dynamically created documentation help via the I/O registry for 'fits'.
"""
out = StringIO()
Table.read.help("fits", out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.read general documentation" not in doc
assert "The available built-in formats" not in doc
assert "Table.read(format='fits') documentation" in doc
assert "hdu : int or str, optional" in doc
def test_table_read_help_ascii():
"""
Test dynamically created documentation help via the I/O registry for 'ascii'.
"""
out = StringIO()
Table.read.help("ascii", out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.read general documentation" not in doc
assert "The available built-in formats" not in doc
assert "Table.read(format='ascii') documentation" in doc
assert "delimiter : str" in doc
assert "ASCII reader 'ascii' details" in doc
assert "Character-delimited table with a single header line" in doc
def test_table_write_help_hdf5():
"""
Test dynamically created documentation help via the I/O registry for 'hdf5'.
"""
out = StringIO()
Table.write.help("hdf5", out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.write general documentation" not in doc
assert "The available built-in formats" not in doc
assert "Table.write(format='hdf5') documentation" in doc
assert "Write a Table object to an HDF5 file" in doc
assert "compression : bool or str or int" in doc
def test_list_formats():
"""
Test getting list of available formats
"""
out = StringIO()
CCDData.write.list_formats(out)
output = out.getvalue()
assert (
output
== """\
Format Read Write Auto-identify
------ ---- ----- -------------
fits Yes Yes Yes"""
)
def test_table_write_help_fits():
"""
Test dynamically created documentation help via the I/O registry for 'fits'.
"""
out = StringIO()
Table.write.help("fits", out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.write general documentation" not in doc
assert "The available built-in formats" not in doc
assert "Table.write(format='fits') documentation" in doc
assert "Write a Table object to a FITS file" in doc
def test_table_write_help_no_format():
"""
Test dynamically created documentation help via the I/O registry for no
format provided.
"""
out = StringIO()
Table.write.help(out=out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.write general documentation" in doc
assert "The available built-in formats" in doc
def test_table_read_help_no_format():
"""
Test dynamically created documentation help via the I/O registry for not
format provided.
"""
out = StringIO()
Table.read.help(out=out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.read general documentation" in doc
assert "The available built-in formats" in doc
def test_ccddata_write_help_fits():
"""
Test dynamically created documentation help via the I/O registry for 'fits'.
"""
out = StringIO()
CCDData.write.help("fits", out)
doc = out.getvalue()
# Check a smattering of expected content
assert "CCDData.write(format='fits') documentation" in doc
assert "Write CCDData object to FITS file" in doc
assert "key_uncertainty_type : str, optional" in doc
def test_ccddata_read_help_fits():
"""Test dynamically created documentation help via the I/O registry for
CCDData 'fits'.
"""
out = StringIO()
CCDData.read.help("fits", out)
doc = out.getvalue()
# Check a smattering of expected content
assert "CCDData.read(format='fits') documentation" in doc
assert "Generate a CCDData object from a FITS file" in doc
assert "hdu_uncertainty : str or None, optional" in doc
def test_table_write_help_jsviewer():
"""
Test dynamically created documentation help via the I/O registry for
'jsviewer'.
"""
out = StringIO()
Table.write.help("jsviewer", out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.write general documentation" not in doc
assert "The available built-in formats" not in doc
assert "Table.write(format='jsviewer') documentation" in doc
|
d0c64ca2c1bc548585b63fc7fd93f30df002a727b4be9d32c5ee132fd888d17b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test :mod:`astropy.io.registry`.
.. todo::
Don't rely on Table for tests
"""
import os
from collections import Counter
from copy import deepcopy
from io import StringIO
import numpy as np
import pytest
import astropy.units as u
from astropy.io import registry as io_registry
from astropy.io.registry import (
IORegistryError,
UnifiedInputRegistry,
UnifiedIORegistry,
UnifiedOutputRegistry,
compat,
)
from astropy.io.registry.base import _UnifiedIORegistryBase
from astropy.io.registry.compat import default_registry
from astropy.table import Table
###############################################################################
# pytest setup and fixtures
class UnifiedIORegistryBaseSubClass(_UnifiedIORegistryBase):
"""Non-abstract subclass of UnifiedIORegistryBase for testing."""
def get_formats(self, data_class=None):
return None
class EmptyData:
"""
Thing that can read and write.
Note that the read/write are the compatibility methods, which allow for the
kwarg ``registry``. This allows us to not subclass ``EmptyData`` for each
of the types of registry (read-only, ...) and use this class everywhere.
"""
read = classmethod(io_registry.read)
write = io_registry.write
class OtherEmptyData:
"""A different class with different I/O"""
read = classmethod(io_registry.read)
write = io_registry.write
def empty_reader(*args, **kwargs):
return EmptyData()
def empty_writer(table, *args, **kwargs):
return "status: success"
def empty_identifier(*args, **kwargs):
return True
@pytest.fixture
def fmtcls1():
return ("test1", EmptyData)
@pytest.fixture
def fmtcls2():
return ("test2", EmptyData)
@pytest.fixture(params=["test1", "test2"])
def fmtcls(request):
yield (request.param, EmptyData)
@pytest.fixture
def original():
ORIGINAL = {}
ORIGINAL["readers"] = deepcopy(default_registry._readers)
ORIGINAL["writers"] = deepcopy(default_registry._writers)
ORIGINAL["identifiers"] = deepcopy(default_registry._identifiers)
return ORIGINAL
###############################################################################
def test_fmcls1_fmtcls2(fmtcls1, fmtcls2):
"""Just check a fact that we rely on in other tests."""
assert fmtcls1[1] is fmtcls2[1]
def test_IORegistryError():
with pytest.raises(IORegistryError, match="just checking"):
raise IORegistryError("just checking")
class TestUnifiedIORegistryBase:
"""Test :class:`astropy.io.registry.UnifiedIORegistryBase`."""
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedIORegistryBaseSubClass
@pytest.fixture
def registry(self):
"""I/O registry. Cleaned before and after each function."""
registry = self._cls()
HAS_READERS = hasattr(registry, "_readers")
HAS_WRITERS = hasattr(registry, "_writers")
# copy and clear original registry
ORIGINAL = {}
ORIGINAL["identifiers"] = deepcopy(registry._identifiers)
registry._identifiers.clear()
if HAS_READERS:
ORIGINAL["readers"] = deepcopy(registry._readers)
registry._readers.clear()
if HAS_WRITERS:
ORIGINAL["writers"] = deepcopy(registry._writers)
registry._writers.clear()
yield registry
registry._identifiers.clear()
registry._identifiers.update(ORIGINAL["identifiers"])
if HAS_READERS:
registry._readers.clear()
registry._readers.update(ORIGINAL["readers"])
if HAS_WRITERS:
registry._writers.clear()
registry._writers.update(ORIGINAL["writers"])
# ===========================================
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
# defaults
assert registry.get_formats() is None
# (kw)args don't matter
assert registry.get_formats(data_class=24) is None
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
# TODO! figure out what can be tested
with registry.delay_doc_updates(EmptyData):
registry.register_identifier(*fmtcls1, empty_identifier)
def test_register_identifier(self, registry, fmtcls1, fmtcls2):
"""Test ``registry.register_identifier()``."""
# initial check it's not registered
assert fmtcls1 not in registry._identifiers
assert fmtcls2 not in registry._identifiers
# register
registry.register_identifier(*fmtcls1, empty_identifier)
registry.register_identifier(*fmtcls2, empty_identifier)
assert fmtcls1 in registry._identifiers
assert fmtcls2 in registry._identifiers
def test_register_identifier_invalid(self, registry, fmtcls):
"""Test calling ``registry.register_identifier()`` twice."""
fmt, cls = fmtcls
registry.register_identifier(fmt, cls, empty_identifier)
with pytest.raises(IORegistryError) as exc:
registry.register_identifier(fmt, cls, empty_identifier)
assert (
str(exc.value) == f"Identifier for format '{fmt}' and class "
f"'{cls.__name__}' is already defined"
)
def test_register_identifier_force(self, registry, fmtcls1):
registry.register_identifier(*fmtcls1, empty_identifier)
registry.register_identifier(*fmtcls1, empty_identifier, force=True)
assert fmtcls1 in registry._identifiers
# -----------------------
def test_unregister_identifier(self, registry, fmtcls1):
"""Test ``registry.unregister_identifier()``."""
registry.register_identifier(*fmtcls1, empty_identifier)
assert fmtcls1 in registry._identifiers
registry.unregister_identifier(*fmtcls1)
assert fmtcls1 not in registry._identifiers
def test_unregister_identifier_invalid(self, registry, fmtcls):
"""Test ``registry.unregister_identifier()``."""
fmt, cls = fmtcls
with pytest.raises(IORegistryError) as exc:
registry.unregister_identifier(fmt, cls)
assert (
str(exc.value)
== f"No identifier defined for format '{fmt}' and class '{cls.__name__}'"
)
def test_identify_format(self, registry, fmtcls1):
"""Test ``registry.identify_format()``."""
fmt, cls = fmtcls1
args = (None, cls, None, None, (None,), {})
# test no formats to identify
formats = registry.identify_format(*args)
assert formats == []
# test there is a format to identify
registry.register_identifier(fmt, cls, empty_identifier)
formats = registry.identify_format(*args)
assert fmt in formats
# ===========================================
# Compat tests
def test_compat_register_identifier(self, registry, fmtcls1):
# with registry specified
assert fmtcls1 not in registry._identifiers
compat.register_identifier(*fmtcls1, empty_identifier, registry=registry)
assert fmtcls1 in registry._identifiers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._identifiers
try:
compat.register_identifier(*fmtcls1, empty_identifier)
except Exception:
pass
else:
assert fmtcls1 in default_registry._identifiers
finally:
default_registry._identifiers.pop(fmtcls1)
def test_compat_unregister_identifier(self, registry, fmtcls1):
# with registry specified
registry.register_identifier(*fmtcls1, empty_identifier)
assert fmtcls1 in registry._identifiers
compat.unregister_identifier(*fmtcls1, registry=registry)
assert fmtcls1 not in registry._identifiers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._identifiers
default_registry.register_identifier(*fmtcls1, empty_identifier)
assert fmtcls1 in default_registry._identifiers
compat.unregister_identifier(*fmtcls1)
assert fmtcls1 not in registry._identifiers
def test_compat_identify_format(self, registry, fmtcls1):
fmt, cls = fmtcls1
args = (None, cls, None, None, (None,), dict())
# with registry specified
registry.register_identifier(*fmtcls1, empty_identifier)
formats = compat.identify_format(*args, registry=registry)
assert fmt in formats
# without registry specified it becomes default_registry
if registry is not default_registry:
try:
default_registry.register_identifier(*fmtcls1, empty_identifier)
except Exception:
pass
else:
formats = compat.identify_format(*args)
assert fmt in formats
finally:
default_registry.unregister_identifier(*fmtcls1)
@pytest.mark.skip("TODO!")
def test_compat_get_formats(self, registry, fmtcls1):
assert False
@pytest.mark.skip("TODO!")
def test_compat_delay_doc_updates(self, registry, fmtcls1):
assert False
class TestUnifiedInputRegistry(TestUnifiedIORegistryBase):
"""Test :class:`astropy.io.registry.UnifiedInputRegistry`."""
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedInputRegistry
# ===========================================
def test_inherited_read_registration(self, registry):
# check that multi-generation inheritance works properly,
# meaning that a child inherits from parents before
# grandparents, see astropy/astropy#7156
class Child1(EmptyData):
pass
class Child2(Child1):
pass
def _read():
return EmptyData()
def _read1():
return Child1()
# check that reader gets inherited
registry.register_reader("test", EmptyData, _read)
assert registry.get_reader("test", Child2) is _read
# check that nearest ancestor is identified
# (i.e. that the reader for Child2 is the registered method
# for Child1, and not Table)
registry.register_reader("test", Child1, _read1)
assert registry.get_reader("test", Child2) is _read1
# ===========================================
@pytest.mark.skip("TODO!")
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
assert False
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
super().test_delay_doc_updates(registry, fmtcls1)
with registry.delay_doc_updates(EmptyData):
registry.register_reader("test", EmptyData, empty_reader)
# test that the doc has not yet been updated.
# if a the format was registered in a different way, then
# test that this method is not present.
if "Format" in EmptyData.read.__doc__:
docs = EmptyData.read.__doc__.split("\n")
ihd = [i for i, s in enumerate(docs) if ("Format" in s)][0]
ifmt = docs[ihd].index("Format") + 1
iread = docs[ihd].index("Read") + 1
# there might not actually be anything here, which is also good
if docs[-2] != docs[-1]:
assert docs[-1][ifmt : ifmt + 5] == "test"
assert docs[-1][iread : iread + 3] != "Yes"
# now test it's updated
docs = EmptyData.read.__doc__.split("\n")
ifmt = docs[ihd].index("Format") + 2
iread = docs[ihd].index("Read") + 1
assert docs[-2][ifmt : ifmt + 4] == "test"
assert docs[-2][iread : iread + 3] == "Yes"
def test_identify_read_format(self, registry):
"""Test ``registry.identify_format()``."""
args = ("read", EmptyData, None, None, (None,), dict())
# test there is no format to identify
formats = registry.identify_format(*args)
assert formats == []
# test there is a format to identify
# doesn't actually matter if register a reader, it returns True for all
registry.register_identifier("test", EmptyData, empty_identifier)
formats = registry.identify_format(*args)
assert "test" in formats
# -----------------------
def test_register_reader(self, registry, fmtcls1, fmtcls2):
"""Test ``registry.register_reader()``."""
# initial check it's not registered
assert fmtcls1 not in registry._readers
assert fmtcls2 not in registry._readers
# register
registry.register_reader(*fmtcls1, empty_reader)
registry.register_reader(*fmtcls2, empty_reader)
assert fmtcls1 in registry._readers
assert fmtcls2 in registry._readers
assert registry._readers[fmtcls1] == (empty_reader, 0) # (f, priority)
assert registry._readers[fmtcls2] == (empty_reader, 0) # (f, priority)
def test_register_reader_invalid(self, registry, fmtcls1):
fmt, cls = fmtcls1
registry.register_reader(fmt, cls, empty_reader)
with pytest.raises(IORegistryError) as exc:
registry.register_reader(fmt, cls, empty_reader)
assert (
str(exc.value) == f"Reader for format '{fmt}' and class "
f"'{cls.__name__}' is already defined"
)
def test_register_reader_force(self, registry, fmtcls1):
registry.register_reader(*fmtcls1, empty_reader)
registry.register_reader(*fmtcls1, empty_reader, force=True)
assert fmtcls1 in registry._readers
def test_register_readers_with_same_name_on_different_classes(self, registry):
# No errors should be generated if the same name is registered for
# different objects...but this failed under python3
registry.register_reader("test", EmptyData, lambda: EmptyData())
registry.register_reader("test", OtherEmptyData, lambda: OtherEmptyData())
t = EmptyData.read(format="test", registry=registry)
assert isinstance(t, EmptyData)
tbl = OtherEmptyData.read(format="test", registry=registry)
assert isinstance(tbl, OtherEmptyData)
# -----------------------
def test_unregister_reader(self, registry, fmtcls1):
"""Test ``registry.unregister_reader()``."""
registry.register_reader(*fmtcls1, empty_reader)
assert fmtcls1 in registry._readers
registry.unregister_reader(*fmtcls1)
assert fmtcls1 not in registry._readers
def test_unregister_reader_invalid(self, registry, fmtcls1):
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
registry.unregister_reader(*fmtcls1)
assert (
str(exc.value)
== f"No reader defined for format '{fmt}' and class '{cls.__name__}'"
)
# -----------------------
def test_get_reader(self, registry, fmtcls):
"""Test ``registry.get_reader()``."""
fmt, cls = fmtcls
with pytest.raises(IORegistryError):
registry.get_reader(fmt, cls)
registry.register_reader(fmt, cls, empty_reader)
reader = registry.get_reader(fmt, cls)
assert reader is empty_reader
def test_get_reader_invalid(self, registry, fmtcls):
fmt, cls = fmtcls
with pytest.raises(IORegistryError) as exc:
registry.get_reader(fmt, cls)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt}' and class '{cls.__name__}'"
)
# -----------------------
def test_read_noformat(self, registry, fmtcls1):
"""Test ``registry.read()`` when there isn't a reader."""
with pytest.raises(IORegistryError) as exc:
fmtcls1[1].read(registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_read_noformat_arbitrary(self, registry, original, fmtcls1):
"""Test that all identifier functions can accept arbitrary input"""
registry._identifiers.update(original["identifiers"])
with pytest.raises(IORegistryError) as exc:
fmtcls1[1].read(object(), registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_read_noformat_arbitrary_file(self, tmp_path, registry, original):
"""Tests that all identifier functions can accept arbitrary files"""
registry._readers.update(original["readers"])
testfile = tmp_path / "foo.example"
with open(testfile, "w") as f:
f.write("Hello world")
with pytest.raises(IORegistryError) as exc:
Table.read(testfile)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_read_toomanyformats(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(fmt1, cls, lambda o, *x, **y: True)
registry.register_identifier(fmt2, cls, lambda o, *x, **y: True)
with pytest.raises(IORegistryError) as exc:
cls.read(registry=registry)
assert str(exc.value) == f"Format is ambiguous - options are: {fmt1}, {fmt2}"
def test_read_uses_priority(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
counter = Counter()
def counting_reader1(*args, **kwargs):
counter[fmt1] += 1
return cls()
def counting_reader2(*args, **kwargs):
counter[fmt2] += 1
return cls()
registry.register_reader(fmt1, cls, counting_reader1, priority=1)
registry.register_reader(fmt2, cls, counting_reader2, priority=2)
registry.register_identifier(fmt1, cls, lambda o, *x, **y: True)
registry.register_identifier(fmt2, cls, lambda o, *x, **y: True)
cls.read(registry=registry)
assert counter[fmt2] == 1
assert counter[fmt1] == 0
def test_read_format_noreader(self, registry, fmtcls1):
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
cls.read(format=fmt, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt}' and class '{cls.__name__}'"
)
def test_read_identifier(self, tmp_path, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(
fmt1, cls, lambda o, path, fileobj, *x, **y: path.endswith("a")
)
registry.register_identifier(
fmt2, cls, lambda o, path, fileobj, *x, **y: path.endswith("b")
)
# Now check that we got past the identifier and are trying to get
# the reader. The registry.get_reader will fail but the error message
# will tell us if the identifier worked.
filename = tmp_path / "testfile.a"
open(filename, "w").close()
with pytest.raises(IORegistryError) as exc:
cls.read(filename, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt1}' and class '{cls.__name__}'"
)
filename = tmp_path / "testfile.b"
open(filename, "w").close()
with pytest.raises(IORegistryError) as exc:
cls.read(filename, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt2}' and class '{cls.__name__}'"
)
def test_read_valid_return(self, registry, fmtcls):
fmt, cls = fmtcls
registry.register_reader(fmt, cls, empty_reader)
t = cls.read(format=fmt, registry=registry)
assert isinstance(t, cls)
def test_read_non_existing_unknown_ext(self, fmtcls1):
"""Raise the correct error when attempting to read a non-existing
file with an unknown extension."""
with pytest.raises(OSError):
data = fmtcls1[1].read("non-existing-file-with-unknown.ext")
def test_read_directory(self, tmp_path, registry, fmtcls1):
"""
Regression test for a bug that caused the I/O registry infrastructure to
not work correctly for datasets that are represented by folders as
opposed to files, when using the descriptors to add read/write methods.
"""
_, cls = fmtcls1
registry.register_identifier(
"test_folder_format", cls, lambda o, *x, **y: o == "read"
)
registry.register_reader("test_folder_format", cls, empty_reader)
filename = tmp_path / "folder_dataset"
filename.mkdir()
# With the format explicitly specified
dataset = cls.read(filename, format="test_folder_format", registry=registry)
assert isinstance(dataset, cls)
# With the auto-format identification
dataset = cls.read(filename, registry=registry)
assert isinstance(dataset, cls)
# ===========================================
# Compat tests
def test_compat_register_reader(self, registry, fmtcls1):
# with registry specified
assert fmtcls1 not in registry._readers
compat.register_reader(*fmtcls1, empty_reader, registry=registry)
assert fmtcls1 in registry._readers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._readers
try:
compat.register_reader(*fmtcls1, empty_identifier)
except Exception:
pass
else:
assert fmtcls1 in default_registry._readers
finally:
default_registry._readers.pop(fmtcls1)
def test_compat_unregister_reader(self, registry, fmtcls1):
# with registry specified
registry.register_reader(*fmtcls1, empty_reader)
assert fmtcls1 in registry._readers
compat.unregister_reader(*fmtcls1, registry=registry)
assert fmtcls1 not in registry._readers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._readers
default_registry.register_reader(*fmtcls1, empty_reader)
assert fmtcls1 in default_registry._readers
compat.unregister_reader(*fmtcls1)
assert fmtcls1 not in registry._readers
def test_compat_get_reader(self, registry, fmtcls1):
# with registry specified
registry.register_reader(*fmtcls1, empty_reader)
reader = compat.get_reader(*fmtcls1, registry=registry)
assert reader is empty_reader
registry.unregister_reader(*fmtcls1)
# without registry specified it becomes default_registry
if registry is not default_registry:
default_registry.register_reader(*fmtcls1, empty_reader)
reader = compat.get_reader(*fmtcls1)
assert reader is empty_reader
default_registry.unregister_reader(*fmtcls1)
def test_compat_read(self, registry, fmtcls1):
fmt, cls = fmtcls1
# with registry specified
registry.register_reader(*fmtcls1, empty_reader)
t = compat.read(cls, format=fmt, registry=registry)
assert isinstance(t, cls)
registry.unregister_reader(*fmtcls1)
# without registry specified it becomes default_registry
if registry is not default_registry:
default_registry.register_reader(*fmtcls1, empty_reader)
t = compat.read(cls, format=fmt)
assert isinstance(t, cls)
default_registry.unregister_reader(*fmtcls1)
class TestUnifiedOutputRegistry(TestUnifiedIORegistryBase):
"""Test :class:`astropy.io.registry.UnifiedOutputRegistry`."""
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedOutputRegistry
# ===========================================
def test_inherited_write_registration(self, registry):
# check that multi-generation inheritance works properly,
# meaning that a child inherits from parents before
# grandparents, see astropy/astropy#7156
class Child1(EmptyData):
pass
class Child2(Child1):
pass
def _write():
return EmptyData()
def _write1():
return Child1()
# check that writer gets inherited
registry.register_writer("test", EmptyData, _write)
assert registry.get_writer("test", Child2) is _write
# check that nearest ancestor is identified
# (i.e. that the writer for Child2 is the registered method
# for Child1, and not Table)
registry.register_writer("test", Child1, _write1)
assert registry.get_writer("test", Child2) is _write1
# ===========================================
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
super().test_delay_doc_updates(registry, fmtcls1)
fmt, cls = fmtcls1
with registry.delay_doc_updates(EmptyData):
registry.register_writer(*fmtcls1, empty_writer)
# test that the doc has not yet been updated.
# if a the format was registered in a different way, then
# test that this method is not present.
if "Format" in EmptyData.read.__doc__:
docs = EmptyData.write.__doc__.split("\n")
ihd = [i for i, s in enumerate(docs) if ("Format" in s)][0]
ifmt = docs[ihd].index("Format")
iwrite = docs[ihd].index("Write") + 1
# there might not actually be anything here, which is also good
if docs[-2] != docs[-1]:
assert fmt in docs[-1][ifmt : ifmt + len(fmt) + 1]
assert docs[-1][iwrite : iwrite + 3] != "Yes"
# now test it's updated
docs = EmptyData.write.__doc__.split("\n")
ifmt = docs[ihd].index("Format") + 1
iwrite = docs[ihd].index("Write") + 2
assert fmt in docs[-2][ifmt : ifmt + len(fmt) + 1]
assert docs[-2][iwrite : iwrite + 3] == "Yes"
@pytest.mark.skip("TODO!")
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
assert False
def test_identify_write_format(self, registry, fmtcls1):
"""Test ``registry.identify_format()``."""
fmt, cls = fmtcls1
args = ("write", cls, None, None, (None,), {})
# test there is no format to identify
formats = registry.identify_format(*args)
assert formats == []
# test there is a format to identify
# doesn't actually matter if register a writer, it returns True for all
registry.register_identifier(fmt, cls, empty_identifier)
formats = registry.identify_format(*args)
assert fmt in formats
# -----------------------
def test_register_writer(self, registry, fmtcls1, fmtcls2):
"""Test ``registry.register_writer()``."""
# initial check it's not registered
assert fmtcls1 not in registry._writers
assert fmtcls2 not in registry._writers
# register
registry.register_writer(*fmtcls1, empty_writer)
registry.register_writer(*fmtcls2, empty_writer)
assert fmtcls1 in registry._writers
assert fmtcls2 in registry._writers
def test_register_writer_invalid(self, registry, fmtcls):
"""Test calling ``registry.register_writer()`` twice."""
fmt, cls = fmtcls
registry.register_writer(fmt, cls, empty_writer)
with pytest.raises(IORegistryError) as exc:
registry.register_writer(fmt, cls, empty_writer)
assert (
str(exc.value) == f"Writer for format '{fmt}' and class "
f"'{cls.__name__}' is already defined"
)
def test_register_writer_force(self, registry, fmtcls1):
registry.register_writer(*fmtcls1, empty_writer)
registry.register_writer(*fmtcls1, empty_writer, force=True)
assert fmtcls1 in registry._writers
# -----------------------
def test_unregister_writer(self, registry, fmtcls1):
"""Test ``registry.unregister_writer()``."""
registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in registry._writers
registry.unregister_writer(*fmtcls1)
assert fmtcls1 not in registry._writers
def test_unregister_writer_invalid(self, registry, fmtcls):
"""Test ``registry.unregister_writer()``."""
fmt, cls = fmtcls
with pytest.raises(IORegistryError) as exc:
registry.unregister_writer(fmt, cls)
assert (
str(exc.value)
== f"No writer defined for format '{fmt}' and class '{cls.__name__}'"
)
# -----------------------
def test_get_writer(self, registry, fmtcls1):
"""Test ``registry.get_writer()``."""
with pytest.raises(IORegistryError):
registry.get_writer(*fmtcls1)
registry.register_writer(*fmtcls1, empty_writer)
writer = registry.get_writer(*fmtcls1)
assert writer is empty_writer
def test_get_writer_invalid(self, registry, fmtcls1):
"""Test invalid ``registry.get_writer()``."""
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
registry.get_writer(fmt, cls)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt}' and class '{cls.__name__}'"
)
# -----------------------
def test_write_noformat(self, registry, fmtcls1):
"""Test ``registry.write()`` when there isn't a writer."""
with pytest.raises(IORegistryError) as exc:
fmtcls1[1]().write(registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_write_noformat_arbitrary(self, registry, original, fmtcls1):
"""Test that all identifier functions can accept arbitrary input"""
registry._identifiers.update(original["identifiers"])
with pytest.raises(IORegistryError) as exc:
fmtcls1[1]().write(object(), registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_write_noformat_arbitrary_file(self, tmp_path, registry, original):
"""Tests that all identifier functions can accept arbitrary files"""
registry._writers.update(original["writers"])
testfile = tmp_path / "foo.example"
with pytest.raises(IORegistryError) as exc:
Table().write(testfile, registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_write_toomanyformats(self, registry, fmtcls1, fmtcls2):
registry.register_identifier(*fmtcls1, lambda o, *x, **y: True)
registry.register_identifier(*fmtcls2, lambda o, *x, **y: True)
with pytest.raises(IORegistryError) as exc:
fmtcls1[1]().write(registry=registry)
assert (
str(exc.value)
== f"Format is ambiguous - options are: {fmtcls1[0]}, {fmtcls2[0]}"
)
def test_write_uses_priority(self, registry, fmtcls1, fmtcls2):
fmt1, cls1 = fmtcls1
fmt2, cls2 = fmtcls2
counter = Counter()
def counting_writer1(*args, **kwargs):
counter[fmt1] += 1
def counting_writer2(*args, **kwargs):
counter[fmt2] += 1
registry.register_writer(fmt1, cls1, counting_writer1, priority=1)
registry.register_writer(fmt2, cls2, counting_writer2, priority=2)
registry.register_identifier(fmt1, cls1, lambda o, *x, **y: True)
registry.register_identifier(fmt2, cls2, lambda o, *x, **y: True)
cls1().write(registry=registry)
assert counter[fmt2] == 1
assert counter[fmt1] == 0
def test_write_format_nowriter(self, registry, fmtcls1):
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
cls().write(format=fmt, registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt}' and class '{cls.__name__}'"
)
def test_write_identifier(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(fmt1, cls, lambda o, *x, **y: x[0].startswith("a"))
registry.register_identifier(fmt2, cls, lambda o, *x, **y: x[0].startswith("b"))
# Now check that we got past the identifier and are trying to get
# the reader. The registry.get_writer will fail but the error message
# will tell us if the identifier worked.
with pytest.raises(IORegistryError) as exc:
cls().write("abc", registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt1}' and class '{cls.__name__}'"
)
with pytest.raises(IORegistryError) as exc:
cls().write("bac", registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt2}' and class '{cls.__name__}'"
)
def test_write_return(self, registry, fmtcls1):
"""Most writers will return None, but other values are not forbidden."""
fmt, cls = fmtcls1
registry.register_writer(fmt, cls, empty_writer)
res = cls.write(cls(), format=fmt, registry=registry)
assert res == "status: success"
# ===========================================
# Compat tests
def test_compat_register_writer(self, registry, fmtcls1):
# with registry specified
assert fmtcls1 not in registry._writers
compat.register_writer(*fmtcls1, empty_writer, registry=registry)
assert fmtcls1 in registry._writers
registry.unregister_writer(*fmtcls1)
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
try:
compat.register_writer(*fmtcls1, empty_writer)
except Exception:
pass
else:
assert fmtcls1 in default_registry._writers
finally:
default_registry._writers.pop(fmtcls1)
def test_compat_unregister_writer(self, registry, fmtcls1):
# with registry specified
registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in registry._writers
compat.unregister_writer(*fmtcls1, registry=registry)
assert fmtcls1 not in registry._writers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
default_registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in default_registry._writers
compat.unregister_writer(*fmtcls1)
assert fmtcls1 not in default_registry._writers
def test_compat_get_writer(self, registry, fmtcls1):
# with registry specified
registry.register_writer(*fmtcls1, empty_writer)
writer = compat.get_writer(*fmtcls1, registry=registry)
assert writer is empty_writer
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
default_registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in default_registry._writers
writer = compat.get_writer(*fmtcls1)
assert writer is empty_writer
default_registry.unregister_writer(*fmtcls1)
assert fmtcls1 not in default_registry._writers
def test_compat_write(self, registry, fmtcls1):
fmt, cls = fmtcls1
# with registry specified
registry.register_writer(*fmtcls1, empty_writer)
res = compat.write(cls(), format=fmt, registry=registry)
assert res == "status: success"
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
default_registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in default_registry._writers
res = compat.write(cls(), format=fmt)
assert res == "status: success"
default_registry.unregister_writer(*fmtcls1)
assert fmtcls1 not in default_registry._writers
class TestUnifiedIORegistry(TestUnifiedInputRegistry, TestUnifiedOutputRegistry):
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedIORegistry
# ===========================================
@pytest.mark.skip("TODO!")
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
assert False
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
super().test_delay_doc_updates(registry, fmtcls1)
# -----------------------
def test_identifier_origin(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(fmt1, cls, lambda o, *x, **y: o == "read")
registry.register_identifier(fmt2, cls, lambda o, *x, **y: o == "write")
registry.register_reader(fmt1, cls, empty_reader)
registry.register_writer(fmt2, cls, empty_writer)
# There should not be too many formats defined
cls.read(registry=registry)
cls().write(registry=registry)
with pytest.raises(IORegistryError) as exc:
cls.read(format=fmt2, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt2}' and class '{cls.__name__}'"
)
with pytest.raises(IORegistryError) as exc:
cls().write(format=fmt1, registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt1}' and class '{cls.__name__}'"
)
class TestDefaultRegistry(TestUnifiedIORegistry):
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = lambda *args: default_registry
# =============================================================================
# Test compat
# much of this is already tested above since EmptyData uses io_registry.X(),
# which are the compat methods.
def test_dir():
"""Test all the compat methods are in the directory"""
dc = dir(compat)
for n in compat.__all__:
assert n in dc
def test_getattr():
for n in compat.__all__:
assert hasattr(compat, n)
with pytest.raises(AttributeError, match="module 'astropy.io.registry.compat'"):
compat.this_is_definitely_not_in_this_module
# =============================================================================
# Table tests
def test_read_basic_table():
registry = Table.read._registry
data = np.array(
list(zip([1, 2, 3], ["a", "b", "c"])), dtype=[("A", int), ("B", "|U1")]
)
try:
registry.register_reader("test", Table, lambda x: Table(x))
except Exception:
pass
else:
t = Table.read(data, format="test")
assert t.keys() == ["A", "B"]
for i in range(3):
assert t["A"][i] == data["A"][i]
assert t["B"][i] == data["B"][i]
finally:
registry._readers.pop("test", None)
class TestSubclass:
"""
Test using registry with a Table sub-class
"""
@pytest.fixture(autouse=True)
def registry(self):
"""I/O registry. Not cleaned."""
yield
def test_read_table_subclass(self):
class MyTable(Table):
pass
data = ["a b", "1 2"]
mt = MyTable.read(data, format="ascii")
t = Table.read(data, format="ascii")
assert np.all(mt == t)
assert mt.colnames == t.colnames
assert type(mt) is MyTable
def test_write_table_subclass(self):
buffer = StringIO()
class MyTable(Table):
pass
mt = MyTable([[1], [2]], names=["a", "b"])
mt.write(buffer, format="ascii")
assert buffer.getvalue() == os.linesep.join(["a b", "1 2", ""])
def test_read_table_subclass_with_columns_attributes(self, tmp_path):
"""Regression test for https://github.com/astropy/astropy/issues/7181"""
class MTable(Table):
pass
mt = MTable([[1, 2.5]], names=["a"])
mt["a"].unit = u.m
mt["a"].format = ".4f"
mt["a"].description = "hello"
testfile = tmp_path / "junk.fits"
mt.write(testfile, overwrite=True)
t = MTable.read(testfile)
assert np.all(mt == t)
assert mt.colnames == t.colnames
assert type(t) is MTable
assert t["a"].unit == u.m
assert t["a"].format == "{:13.4f}"
assert t["a"].description == "hello"
|
1f9ab14f8b4e8092692004c06613b5e8caada0cdc7f2d2fbc66718f7758eb6e7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
# LOCAL
from astropy.io.votable import converters, exceptions, tree
def test_reraise():
def fail():
raise RuntimeError("This failed")
try:
try:
fail()
except RuntimeError as e:
exceptions.vo_reraise(e, additional="From here")
except RuntimeError as e:
assert "From here" in str(e)
else:
assert False
def test_parse_vowarning():
config = {"verify": "exception", "filename": "foo.xml"}
pos = (42, 64)
with pytest.warns(exceptions.W47) as w:
field = tree.Field(None, name="c", datatype="char", config=config, pos=pos)
converters.get_converter(field, config=config, pos=pos)
parts = exceptions.parse_vowarning(str(w[0].message))
match = {
"number": 47,
"is_exception": False,
"nchar": 64,
"warning": "W47",
"is_something": True,
"message": "Missing arraysize indicates length 1",
"doc_url": "io/votable/api_exceptions.html#w47",
"nline": 42,
"is_warning": True,
}
assert parts == match
def test_suppress_warnings():
cfg = {}
warn = exceptions.W01("foo")
with exceptions.conf.set_temp("max_warnings", 2):
with pytest.warns(exceptions.W01) as record:
exceptions._suppressed_warning(warn, cfg)
assert len(record) == 1
assert "suppressing" not in str(record[0].message)
with pytest.warns(exceptions.W01, match="suppressing"):
exceptions._suppressed_warning(warn, cfg)
exceptions._suppressed_warning(warn, cfg)
assert cfg["_warning_counts"][exceptions.W01] == 3
assert exceptions.conf.max_warnings == 10
|
985f26049d2348bf90536d666c584f01dbc2492cc4f7ea5e8df3e1cedbb031fa | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A set of tests for the util.py module
"""
import pytest
from astropy.io.votable import util
def test_range_list():
assert util.coerce_range_list_param((5,)) == ("5.0", 1)
def test_range_list2():
assert util.coerce_range_list_param((5e-7, 8e-7)) == ("5e-07,8e-07", 2)
def test_range_list3():
assert util.coerce_range_list_param((5e-7, 8e-7, "FOO")) == ("5e-07,8e-07;FOO", 3)
def test_range_list4a():
with pytest.raises(ValueError):
util.coerce_range_list_param(
(5e-7, (None, 8e-7), (4, None), (4, 5), "J", "FOO")
)
def test_range_list4():
assert util.coerce_range_list_param(
(5e-7, (None, 8e-7), (4, None), (4, 5), "J", "FOO"), numeric=False
) == ("5e-07,/8e-07,4/,4/5,J;FOO", 6)
def test_range_list5():
with pytest.raises(ValueError):
util.coerce_range_list_param(("FOO",))
def test_range_list6():
with pytest.raises(ValueError):
print(util.coerce_range_list_param((5, "FOO"), util.stc_reference_frames))
def test_range_list7():
assert util.coerce_range_list_param(("J",), numeric=False) == ("J", 1)
def test_range_list8():
for s in [
"5.0",
"5e-07,8e-07",
"5e-07,8e-07;FOO",
"5e-07,/8e-07,4.0/,4.0/5.0;FOO",
"J",
]:
assert util.coerce_range_list_param(s, numeric=False)[0] == s
def test_range_list9a():
with pytest.raises(ValueError):
util.coerce_range_list_param("52,-27.8;FOO", util.stc_reference_frames)
def test_range_list9():
assert util.coerce_range_list_param("52,-27.8;GALACTIC", util.stc_reference_frames)
|
333b4f6588c53142594a3ae9798396833d714ef39f836e851917f50e00faf068 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# LOCAL
from astropy.io.votable import parse
from astropy.utils.data import get_pkg_data_filename
def test_resource_groups():
# Read the VOTABLE
votable = parse(get_pkg_data_filename("data/resource_groups.xml"))
resource = votable.resources[0]
groups = resource.groups
params = resource.params
# Test that params inside groups are not outside
assert len(groups[0].entries) == 1
assert groups[0].entries[0].name == "ID"
assert len(params) == 2
assert params[0].name == "standardID"
assert params[1].name == "accessURL"
|
e97291eb46555b7866d932807e2ab3313f44b1936c4f27993bee13150d650099 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a set of regression tests for vo.
"""
# STDLIB
import difflib
import gzip
import io
import pathlib
import sys
from unittest import mock
import numpy as np
# THIRD-PARTY
import pytest
from numpy.testing import assert_array_equal
from astropy.io.votable import tree
from astropy.io.votable.exceptions import W39, VOTableSpecError, VOWarning
# LOCAL
from astropy.io.votable.table import parse, parse_single_table, validate
from astropy.io.votable.xmlutil import validate_schema
from astropy.utils.data import get_pkg_data_filename, get_pkg_data_filenames
# Determine the kind of float formatting in this build of Python
if hasattr(sys, "float_repr_style"):
legacy_float_repr = sys.float_repr_style == "legacy"
else:
legacy_float_repr = sys.platform.startswith("win")
def assert_validate_schema(filename, version):
if sys.platform.startswith("win"):
return
try:
rc, stdout, stderr = validate_schema(filename, version)
except OSError:
# If xmllint is not installed, we want the test to pass anyway
return
assert rc == 0, "File did not validate against VOTable schema"
def test_parse_single_table():
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
table = parse_single_table(get_pkg_data_filename("data/regression.xml"))
assert isinstance(table, tree.Table)
assert len(table.array) == 5
def test_parse_single_table2():
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
table2 = parse_single_table(
get_pkg_data_filename("data/regression.xml"), table_number=1
)
assert isinstance(table2, tree.Table)
assert len(table2.array) == 1
assert len(table2.array.dtype.names) == 28
def test_parse_single_table3():
with pytest.raises(IndexError):
parse_single_table(get_pkg_data_filename("data/regression.xml"), table_number=3)
def _test_regression(tmp_path, _python_based=False, binary_mode=1):
# Read the VOTABLE
votable = parse(
get_pkg_data_filename("data/regression.xml"),
_debug_python_based_parser=_python_based,
)
table = votable.get_first_table()
dtypes = [
(("string test", "string_test"), "|O8"),
(("fixed string test", "string_test_2"), "<U10"),
("unicode_test", "|O8"),
(("unicode test", "fixed_unicode_test"), "<U10"),
(("string array test", "string_array_test"), "<U4"),
("unsignedByte", "|u1"),
("short", "<i2"),
("int", "<i4"),
("long", "<i8"),
("double", "<f8"),
("float", "<f4"),
("array", "|O8"),
("bit", "|b1"),
("bitarray", "|b1", (3, 2)),
("bitvararray", "|O8"),
("bitvararray2", "|O8"),
("floatComplex", "<c8"),
("doubleComplex", "<c16"),
("doubleComplexArray", "|O8"),
("doubleComplexArrayFixed", "<c16", (2,)),
("boolean", "|b1"),
("booleanArray", "|b1", (4,)),
("nulls", "<i4"),
("nulls_array", "<i4", (2, 2)),
("precision1", "<f8"),
("precision2", "<f8"),
("doublearray", "|O8"),
("bitarray2", "|b1", (16,)),
]
if sys.byteorder == "big":
new_dtypes = []
for dtype in dtypes:
dtype = list(dtype)
dtype[1] = dtype[1].replace("<", ">")
new_dtypes.append(tuple(dtype))
dtypes = new_dtypes
assert table.array.dtype == dtypes
votable.to_xml(
str(tmp_path / "regression.tabledata.xml"),
_debug_python_based_parser=_python_based,
)
assert_validate_schema(str(tmp_path / "regression.tabledata.xml"), votable.version)
if binary_mode == 1:
votable.get_first_table().format = "binary"
votable.version = "1.1"
elif binary_mode == 2:
votable.get_first_table()._config["version_1_3_or_later"] = True
votable.get_first_table().format = "binary2"
votable.version = "1.3"
# Also try passing a file handle
with open(str(tmp_path / "regression.binary.xml"), "wb") as fd:
votable.to_xml(fd, _debug_python_based_parser=_python_based)
assert_validate_schema(str(tmp_path / "regression.binary.xml"), votable.version)
# Also try passing a file handle
with open(str(tmp_path / "regression.binary.xml"), "rb") as fd:
votable2 = parse(fd, _debug_python_based_parser=_python_based)
votable2.get_first_table().format = "tabledata"
votable2.to_xml(
str(tmp_path / "regression.bin.tabledata.xml"),
_astropy_version="testing",
_debug_python_based_parser=_python_based,
)
assert_validate_schema(
str(tmp_path / "regression.bin.tabledata.xml"), votable.version
)
with open(
get_pkg_data_filename(
f"data/regression.bin.tabledata.truth.{votable.version}.xml"
),
encoding="utf-8",
) as fd:
truth = fd.readlines()
with open(str(tmp_path / "regression.bin.tabledata.xml"), encoding="utf-8") as fd:
output = fd.readlines()
# If the lines happen to be different, print a diff
# This is convenient for debugging
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile="truth", tofile="output")
)
assert truth == output
# Test implicit gzip saving
votable2.to_xml(
str(tmp_path / "regression.bin.tabledata.xml.gz"),
_astropy_version="testing",
_debug_python_based_parser=_python_based,
)
with gzip.GzipFile(str(tmp_path / "regression.bin.tabledata.xml.gz"), "rb") as gzfd:
output = gzfd.readlines()
output = [x.decode("utf-8").rstrip() for x in output]
truth = [x.rstrip() for x in truth]
assert truth == output
@pytest.mark.xfail("legacy_float_repr")
def test_regression(tmp_path):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmp_path, False)
@pytest.mark.xfail("legacy_float_repr")
def test_regression_python_based_parser(tmp_path):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmp_path, True)
@pytest.mark.xfail("legacy_float_repr")
def test_regression_binary2(tmp_path):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmp_path, False, 2)
class TestFixups:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.table = parse(
get_pkg_data_filename("data/regression.xml")
).get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_implicit_id(self):
assert_array_equal(self.array["string_test_2"], self.array["fixed string test"])
class TestReferences:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.votable = parse(get_pkg_data_filename("data/regression.xml"))
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_fieldref(self):
fieldref = self.table.groups[1].entries[0]
assert isinstance(fieldref, tree.FieldRef)
assert fieldref.get_ref().name == "boolean"
assert fieldref.get_ref().datatype == "boolean"
def test_paramref(self):
paramref = self.table.groups[0].entries[0]
assert isinstance(paramref, tree.ParamRef)
assert paramref.get_ref().name == "INPUT"
assert paramref.get_ref().datatype == "float"
def test_iter_fields_and_params_on_a_group(self):
assert len(list(self.table.groups[1].iter_fields_and_params())) == 2
def test_iter_groups_on_a_group(self):
assert len(list(self.table.groups[1].iter_groups())) == 1
def test_iter_groups(self):
# Because of the ref'd table, there are more logical groups
# than actually exist in the file
assert len(list(self.votable.iter_groups())) == 9
def test_ref_table(self):
tables = list(self.votable.iter_tables())
for x, y in zip(tables[0].array.data[0], tables[1].array.data[0]):
assert_array_equal(x, y)
def test_iter_coosys(self):
assert len(list(self.votable.iter_coosys())) == 1
def test_select_columns_by_index():
columns = [0, 5, 13]
table = parse(
get_pkg_data_filename("data/regression.xml"), columns=columns
).get_first_table()
array = table.array
mask = table.array.mask
assert array["string_test"][0] == "String & test"
columns = ["string_test", "unsignedByte", "bitarray"]
for c in columns:
assert not np.all(mask[c])
assert np.all(mask["unicode_test"])
def test_select_columns_by_name():
columns = ["string_test", "unsignedByte", "bitarray"]
table = parse(
get_pkg_data_filename("data/regression.xml"), columns=columns
).get_first_table()
array = table.array
mask = table.array.mask
assert array["string_test"][0] == "String & test"
for c in columns:
assert not np.all(mask[c])
assert np.all(mask["unicode_test"])
class TestParse:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.votable = parse(get_pkg_data_filename("data/regression.xml"))
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_string_test(self):
assert issubclass(self.array["string_test"].dtype.type, np.object_)
assert_array_equal(
self.array["string_test"],
["String & test", "String & test", "XXXX", "", ""],
)
def test_fixed_string_test(self):
assert issubclass(self.array["string_test_2"].dtype.type, np.unicode_)
assert_array_equal(
self.array["string_test_2"], ["Fixed stri", "0123456789", "XXXX", "", ""]
)
def test_unicode_test(self):
assert issubclass(self.array["unicode_test"].dtype.type, np.object_)
assert_array_equal(
self.array["unicode_test"],
["Ceçi n'est pas un pipe", "வணக்கம்", "XXXX", "", ""],
)
def test_fixed_unicode_test(self):
assert issubclass(self.array["fixed_unicode_test"].dtype.type, np.unicode_)
assert_array_equal(
self.array["fixed_unicode_test"],
["Ceçi n'est", "வணக்கம்", "0123456789", "", ""],
)
def test_unsignedByte(self):
assert issubclass(self.array["unsignedByte"].dtype.type, np.uint8)
assert_array_equal(self.array["unsignedByte"], [128, 255, 0, 255, 255])
assert not np.any(self.mask["unsignedByte"])
def test_short(self):
assert issubclass(self.array["short"].dtype.type, np.int16)
assert_array_equal(self.array["short"], [4096, 32767, -4096, 32767, 32767])
assert not np.any(self.mask["short"])
def test_int(self):
assert issubclass(self.array["int"].dtype.type, np.int32)
assert_array_equal(
self.array["int"], [268435456, 2147483647, -268435456, 268435455, 123456789]
)
assert_array_equal(self.mask["int"], [False, False, False, False, True])
def test_long(self):
assert issubclass(self.array["long"].dtype.type, np.int64)
assert_array_equal(
self.array["long"],
[
922337203685477,
123456789,
-1152921504606846976,
1152921504606846975,
123456789,
],
)
assert_array_equal(self.mask["long"], [False, True, False, False, True])
def test_double(self):
assert issubclass(self.array["double"].dtype.type, np.float64)
assert_array_equal(
self.array["double"], [8.9990234375, 0.0, np.inf, np.nan, -np.inf]
)
assert_array_equal(self.mask["double"], [False, False, False, True, False])
def test_float(self):
assert issubclass(self.array["float"].dtype.type, np.float32)
assert_array_equal(self.array["float"], [1.0, 0.0, np.inf, np.inf, np.nan])
assert_array_equal(self.mask["float"], [False, False, False, False, True])
def test_array(self):
assert issubclass(self.array["array"].dtype.type, np.object_)
match = [
[],
[[42, 32], [12, 32]],
[[12, 34], [56, 78], [87, 65], [43, 21]],
[[-1, 23]],
[[31, -1]],
]
for a, b in zip(self.array["array"], match):
# assert issubclass(a.dtype.type, np.int64)
# assert a.shape[1] == 2
for a0, b0 in zip(a, b):
assert issubclass(a0.dtype.type, np.int64)
assert_array_equal(a0, b0)
assert self.array.data["array"][3].mask[0][0]
assert self.array.data["array"][4].mask[0][1]
def test_bit(self):
assert issubclass(self.array["bit"].dtype.type, np.bool_)
assert_array_equal(self.array["bit"], [True, False, True, False, False])
def test_bit_mask(self):
assert_array_equal(self.mask["bit"], [False, False, False, False, True])
def test_bitarray(self):
assert issubclass(self.array["bitarray"].dtype.type, np.bool_)
assert self.array["bitarray"].shape == (5, 3, 2)
assert_array_equal(
self.array["bitarray"],
[
[[True, False], [True, True], [False, True]],
[[False, True], [False, False], [True, True]],
[[True, True], [True, False], [False, False]],
[[False, False], [False, False], [False, False]],
[[False, False], [False, False], [False, False]],
],
)
def test_bitarray_mask(self):
assert_array_equal(
self.mask["bitarray"],
[
[[False, False], [False, False], [False, False]],
[[False, False], [False, False], [False, False]],
[[False, False], [False, False], [False, False]],
[[True, True], [True, True], [True, True]],
[[True, True], [True, True], [True, True]],
],
)
def test_bitvararray(self):
assert issubclass(self.array["bitvararray"].dtype.type, np.object_)
match = [
[True, True, True],
[False, False, False, False, False],
[True, False, True, False, True],
[],
[],
]
for a, b in zip(self.array["bitvararray"], match):
assert_array_equal(a, b)
match_mask = [
[False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
False,
False,
]
for a, b in zip(self.array["bitvararray"], match_mask):
assert_array_equal(a.mask, b)
def test_bitvararray2(self):
assert issubclass(self.array["bitvararray2"].dtype.type, np.object_)
match = [
[],
[
[[False, True], [False, False], [True, False]],
[[True, False], [True, False], [True, False]],
],
[[[True, True], [True, True], [True, True]]],
[],
[],
]
for a, b in zip(self.array["bitvararray2"], match):
for a0, b0 in zip(a, b):
assert a0.shape == (3, 2)
assert issubclass(a0.dtype.type, np.bool_)
assert_array_equal(a0, b0)
def test_floatComplex(self):
assert issubclass(self.array["floatComplex"].dtype.type, np.complex64)
assert_array_equal(
self.array["floatComplex"],
[np.nan + 0j, 0 + 0j, 0 + -1j, np.nan + 0j, np.nan + 0j],
)
assert_array_equal(self.mask["floatComplex"], [True, False, False, True, True])
def test_doubleComplex(self):
assert issubclass(self.array["doubleComplex"].dtype.type, np.complex128)
assert_array_equal(
self.array["doubleComplex"],
[np.nan + 0j, 0 + 0j, 0 + -1j, np.nan + (np.inf * 1j), np.nan + 0j],
)
assert_array_equal(self.mask["doubleComplex"], [True, False, False, True, True])
def test_doubleComplexArray(self):
assert issubclass(self.array["doubleComplexArray"].dtype.type, np.object_)
assert [len(x) for x in self.array["doubleComplexArray"]] == [0, 2, 2, 0, 0]
def test_boolean(self):
assert issubclass(self.array["boolean"].dtype.type, np.bool_)
assert_array_equal(self.array["boolean"], [True, False, True, False, False])
def test_boolean_mask(self):
assert_array_equal(self.mask["boolean"], [False, False, False, False, True])
def test_boolean_array(self):
assert issubclass(self.array["booleanArray"].dtype.type, np.bool_)
assert_array_equal(
self.array["booleanArray"],
[
[True, True, True, True],
[True, True, False, True],
[True, True, False, True],
[False, False, False, False],
[False, False, False, False],
],
)
def test_boolean_array_mask(self):
assert_array_equal(
self.mask["booleanArray"],
[
[False, False, False, False],
[False, False, False, False],
[False, False, True, False],
[True, True, True, True],
[True, True, True, True],
],
)
def test_nulls(self):
assert_array_equal(self.array["nulls"], [0, -9, 2, -9, -9])
assert_array_equal(self.mask["nulls"], [False, True, False, True, True])
def test_nulls_array(self):
assert_array_equal(
self.array["nulls_array"],
[
[[-9, -9], [-9, -9]],
[[0, 1], [2, 3]],
[[-9, 0], [-9, 1]],
[[0, -9], [1, -9]],
[[-9, -9], [-9, -9]],
],
)
assert_array_equal(
self.mask["nulls_array"],
[
[[True, True], [True, True]],
[[False, False], [False, False]],
[[True, False], [True, False]],
[[False, True], [False, True]],
[[True, True], [True, True]],
],
)
def test_double_array(self):
assert issubclass(self.array["doublearray"].dtype.type, np.object_)
assert len(self.array["doublearray"][0]) == 0
assert_array_equal(
self.array["doublearray"][1], [0, 1, np.inf, -np.inf, np.nan, 0, -1]
)
assert_array_equal(
self.array.data["doublearray"][1].mask,
[False, False, False, False, False, False, True],
)
def test_bit_array2(self):
assert_array_equal(
self.array["bitarray2"][0],
[
True,
True,
True,
True,
False,
False,
False,
False,
True,
True,
True,
True,
False,
False,
False,
False,
],
)
def test_bit_array2_mask(self):
assert not np.any(self.mask["bitarray2"][0])
assert np.all(self.mask["bitarray2"][1:])
def test_get_coosys_by_id(self):
coosys = self.votable.get_coosys_by_id("J2000")
assert coosys.system == "eq_FK5"
def test_get_field_by_utype(self):
fields = list(self.votable.get_fields_by_utype("myint"))
assert fields[0].name == "int"
assert fields[0].values.min == -1000
def test_get_info_by_id(self):
info = self.votable.get_info_by_id("QUERY_STATUS")
assert info.value == "OK"
if self.votable.version != "1.1":
info = self.votable.get_info_by_id("ErrorInfo")
assert info.value == "One might expect to find some INFO here, too..."
def test_repr(self):
assert "3 tables" in repr(self.votable)
assert (
repr(list(self.votable.iter_fields_and_params())[0])
== '<PARAM ID="awesome" arraysize="*" datatype="float" '
'name="INPUT" unit="deg" value="[0.0 0.0]"/>'
)
# Smoke test
repr(list(self.votable.iter_groups()))
# Resource
assert repr(self.votable.resources) == "[</>]"
class TestThroughTableData(TestParse):
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_bit_mask(self):
assert_array_equal(self.mask["bit"], [False, False, False, False, False])
def test_bitarray_mask(self):
assert not np.any(self.mask["bitarray"])
def test_bit_array2_mask(self):
assert not np.any(self.mask["bitarray2"])
def test_schema(self, tmp_path):
# have to use an actual file because assert_validate_schema only works
# on filenames, not file-like objects
fn = tmp_path / "test_through_tabledata.xml"
with open(fn, "wb") as f:
f.write(self.xmlout.getvalue())
assert_validate_schema(fn, "1.1")
class TestThroughBinary(TestParse):
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
votable.get_first_table().format = "binary"
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
# Masked values in bit fields don't roundtrip through the binary
# representation -- that's not a bug, just a limitation, so
# override the mask array checks here.
def test_bit_mask(self):
assert not np.any(self.mask["bit"])
def test_bitarray_mask(self):
assert not np.any(self.mask["bitarray"])
def test_bit_array2_mask(self):
assert not np.any(self.mask["bitarray2"])
class TestThroughBinary2(TestParse):
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
votable.version = "1.3"
votable.get_first_table()._config["version_1_3_or_later"] = True
votable.get_first_table().format = "binary2"
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_get_coosys_by_id(self):
# No COOSYS in VOTable 1.2 or later
pass
def table_from_scratch():
from astropy.io.votable.tree import Field, Resource, Table, VOTableFile
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend(
[
Field(votable, ID="filename", datatype="char"),
Field(votable, ID="matrix", datatype="double", arraysize="2x2"),
]
)
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ("test1.xml", [[1, 0], [0, 1]])
table.array[1] = ("test2.xml", [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
out = io.StringIO()
votable.to_xml(out)
# https://github.com/astropy/astropy/issues/13341
@np.errstate(over="ignore")
def test_open_files():
for filename in get_pkg_data_filenames("data", pattern="*.xml"):
if filename.endswith("custom_datatype.xml") or filename.endswith(
"timesys_errors.xml"
):
continue
parse(filename)
def test_too_many_columns():
with pytest.raises(VOTableSpecError):
parse(get_pkg_data_filename("data/too_many_columns.xml.gz"))
def test_build_from_scratch(tmp_path):
# Create a new VOTable file...
votable = tree.VOTableFile()
# ...with one resource...
resource = tree.Resource()
votable.resources.append(resource)
# ... with one table
table = tree.Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend(
[
tree.Field(
votable, ID="filename", name="filename", datatype="char", arraysize="1"
),
tree.Field(
votable, ID="matrix", name="matrix", datatype="double", arraysize="2x2"
),
]
)
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ("test1.xml", [[1, 0], [0, 1]])
table.array[1] = ("test2.xml", [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
votable.to_xml(str(tmp_path / "new_votable.xml"))
votable = parse(str(tmp_path / "new_votable.xml"))
table = votable.get_first_table()
assert_array_equal(
table.array.mask,
np.array(
[
(False, [[False, False], [False, False]]),
(False, [[False, False], [False, False]]),
],
dtype=[("filename", "?"), ("matrix", "?", (2, 2))],
),
)
def test_validate(test_path_object=False):
"""
test_path_object is needed for test below ``test_validate_path_object``
so that file could be passed as pathlib.Path object.
"""
output = io.StringIO()
fpath = get_pkg_data_filename("data/regression.xml")
if test_path_object:
fpath = pathlib.Path(fpath)
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
result = validate(fpath, output, xmllint=False)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('validation.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(get_pkg_data_filename("data/validation.txt"), encoding="utf-8") as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile="truth", tofile="output")
)
assert truth == output
@mock.patch("subprocess.Popen")
def test_validate_xmllint_true(mock_subproc_popen):
process_mock = mock.Mock()
attrs = {"communicate.return_value": ("ok", "ko"), "returncode": 0}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
assert validate(get_pkg_data_filename("data/empty_table.xml"), xmllint=True)
def test_validate_path_object():
"""
Validating when source is passed as path object. (#4412)
"""
test_validate(test_path_object=True)
def test_gzip_filehandles(tmp_path):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
# W39: Bit values can not be masked
with pytest.warns(W39):
with open(tmp_path / "regression.compressed.xml", "wb") as fd:
votable.to_xml(fd, compressed=True, _astropy_version="testing")
with open(tmp_path / "regression.compressed.xml", "rb") as fd:
votable = parse(fd)
def test_from_scratch_example():
_run_test_from_scratch_example()
def _run_test_from_scratch_example():
from astropy.io.votable.tree import Field, Resource, Table, VOTableFile
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend(
[
Field(votable, name="filename", datatype="char", arraysize="*"),
Field(votable, name="matrix", datatype="double", arraysize="2x2"),
]
)
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ("test1.xml", [[1, 0], [0, 1]])
table.array[1] = ("test2.xml", [[0.5, 0.3], [0.2, 0.1]])
assert table.array[0][0] == "test1.xml"
def test_fileobj():
# Assert that what we get back is a raw C file pointer
# so it will be super fast in the C extension.
from astropy.utils.xml import iterparser
filename = get_pkg_data_filename("data/regression.xml")
with iterparser._convert_to_fd_or_read_function(filename) as fd:
if sys.platform == "win32":
fd()
else:
assert isinstance(fd, io.FileIO)
def test_nonstandard_units():
from astropy import units as u
votable = parse(get_pkg_data_filename("data/nonstandard_units.xml"))
assert isinstance(votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
votable = parse(
get_pkg_data_filename("data/nonstandard_units.xml"), unit_format="generic"
)
assert not isinstance(votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
def test_resource_structure():
# Based on issue #1223, as reported by @astro-friedel and @RayPlante
from astropy.io.votable import tree as vot
vtf = vot.VOTableFile()
r1 = vot.Resource()
vtf.resources.append(r1)
t1 = vot.Table(vtf)
t1.name = "t1"
t2 = vot.Table(vtf)
t2.name = "t2"
r1.tables.append(t1)
r1.tables.append(t2)
r2 = vot.Resource()
vtf.resources.append(r2)
t3 = vot.Table(vtf)
t3.name = "t3"
t4 = vot.Table(vtf)
t4.name = "t4"
r2.tables.append(t3)
r2.tables.append(t4)
r3 = vot.Resource()
vtf.resources.append(r3)
t5 = vot.Table(vtf)
t5.name = "t5"
t6 = vot.Table(vtf)
t6.name = "t6"
r3.tables.append(t5)
r3.tables.append(t6)
buff = io.BytesIO()
vtf.to_xml(buff)
buff.seek(0)
vtf2 = parse(buff)
assert len(vtf2.resources) == 3
for r in range(len(vtf2.resources)):
res = vtf2.resources[r]
assert len(res.tables) == 2
assert len(res.resources) == 0
def test_no_resource_check():
output = io.StringIO()
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
result = validate(
get_pkg_data_filename("data/no_resource.xml"), output, xmllint=False
)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('no_resource.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(get_pkg_data_filename("data/no_resource.txt"), encoding="utf-8") as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile="truth", tofile="output")
)
assert truth == output
def test_instantiate_vowarning():
# This used to raise a deprecation exception.
# See https://github.com/astropy/astroquery/pull/276
VOWarning(())
def test_custom_datatype():
votable = parse(
get_pkg_data_filename("data/custom_datatype.xml"),
datatype_mapping={"bar": "int"},
)
table = votable.get_first_table()
assert table.array.dtype["foo"] == np.int32
def _timesys_tests(votable):
assert len(list(votable.iter_timesys())) == 4
timesys = votable.get_timesys_by_id("time_frame")
assert timesys.timeorigin == 2455197.5
assert timesys.timescale == "TCB"
assert timesys.refposition == "BARYCENTER"
timesys = votable.get_timesys_by_id("mjd_origin")
assert timesys.timeorigin == "MJD-origin"
assert timesys.timescale == "TDB"
assert timesys.refposition == "EMBARYCENTER"
timesys = votable.get_timesys_by_id("jd_origin")
assert timesys.timeorigin == "JD-origin"
assert timesys.timescale == "TT"
assert timesys.refposition == "HELIOCENTER"
timesys = votable.get_timesys_by_id("no_origin")
assert timesys.timeorigin is None
assert timesys.timescale == "UTC"
assert timesys.refposition == "TOPOCENTER"
def test_timesys():
votable = parse(get_pkg_data_filename("data/timesys.xml"))
_timesys_tests(votable)
def test_timesys_roundtrip():
orig_votable = parse(get_pkg_data_filename("data/timesys.xml"))
bio = io.BytesIO()
orig_votable.to_xml(bio)
bio.seek(0)
votable = parse(bio)
_timesys_tests(votable)
def test_timesys_errors():
output = io.StringIO()
validate(get_pkg_data_filename("data/timesys_errors.xml"), output, xmllint=False)
outstr = output.getvalue()
assert "E23: Invalid timeorigin attribute 'bad-origin'" in outstr
assert "E22: ID attribute is required for all TIMESYS elements" in outstr
assert "W48: Unknown attribute 'refposition_mispelled' on TIMESYS" in outstr
|
75d5810622dea679def80c5caee240f102bc813b73bd59495220d5b9fb485b32 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.io.votable import ucd
def test_none():
assert ucd.check_ucd(None)
examples = {
"phys.temperature": [("ivoa", "phys.temperature")],
"pos.eq.ra;meta.main": [("ivoa", "pos.eq.ra"), ("ivoa", "meta.main")],
"meta.id;src": [("ivoa", "meta.id"), ("ivoa", "src")],
"phot.flux;em.radio;arith.ratio": [
("ivoa", "phot.flux"),
("ivoa", "em.radio"),
("ivoa", "arith.ratio"),
],
"PHot.Flux;EM.Radio;ivoa:arith.Ratio": [
("ivoa", "phot.flux"),
("ivoa", "em.radio"),
("ivoa", "arith.ratio"),
],
"pos.galactic.lat": [("ivoa", "pos.galactic.lat")],
"meta.code;phot.mag": [("ivoa", "meta.code"), ("ivoa", "phot.mag")],
"stat.error;phot.mag": [("ivoa", "stat.error"), ("ivoa", "phot.mag")],
"phys.temperature;instr;stat.max": [
("ivoa", "phys.temperature"),
("ivoa", "instr"),
("ivoa", "stat.max"),
],
"stat.error;phot.mag;em.opt.V": [
("ivoa", "stat.error"),
("ivoa", "phot.mag"),
("ivoa", "em.opt.V"),
],
"phot.color;em.opt.B;em.opt.V": [
("ivoa", "phot.color"),
("ivoa", "em.opt.B"),
("ivoa", "em.opt.V"),
],
"stat.error;phot.color;em.opt.B;em.opt.V": [
("ivoa", "stat.error"),
("ivoa", "phot.color"),
("ivoa", "em.opt.B"),
("ivoa", "em.opt.V"),
],
}
def test_check():
for s, p in examples.items():
assert ucd.parse_ucd(s, True, True) == p
assert ucd.check_ucd(s, True, True)
def test_too_many_colons():
with pytest.raises(ValueError):
ucd.parse_ucd("ivoa:stsci:phot", True, True)
def test_invalid_namespace():
with pytest.raises(ValueError):
ucd.parse_ucd("_ivoa:phot.mag", True, True)
def test_invalid_word():
with pytest.raises(ValueError):
ucd.parse_ucd("-pho")
|
6d7c705b86718b97f9a12e94e97c0a7abf32ae059c58caa93a173d818937a748 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the conversion to/from astropy.table
"""
import io
import os
import pathlib
import numpy as np
import pytest
from astropy.config import reload_config, set_temp_config
from astropy.io.votable import conf, from_table, is_votable, tree, validate
from astropy.io.votable.exceptions import E25, W39, VOWarning
from astropy.io.votable.table import parse, writeto
from astropy.table import Column, Table
from astropy.table.table_helpers import simple_table
from astropy.units import Unit
from astropy.utils.data import (
get_pkg_data_filename,
get_pkg_data_fileobj,
get_pkg_data_path,
)
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
@pytest.fixture
def home_is_data(monkeypatch):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the data directory.
"""
path = get_pkg_data_path("data")
# For Unix
monkeypatch.setenv("HOME", path)
# For Windows
monkeypatch.setenv("USERPROFILE", path)
@pytest.fixture
def home_is_tmpdir(monkeypatch, tmp_path):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv("HOME", str(tmp_path))
# For Windows
monkeypatch.setenv("USERPROFILE", str(tmp_path))
def test_table(tmp_path):
# Read the VOTABLE
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
table = votable.get_first_table()
astropy_table = table.to_table()
for name in table.array.dtype.names:
assert np.all(astropy_table.mask[name] == table.array.mask[name])
votable2 = tree.VOTableFile.from_table(astropy_table)
t = votable2.get_first_table()
field_types = [
("string_test", {"datatype": "char", "arraysize": "*"}),
("string_test_2", {"datatype": "char", "arraysize": "10"}),
("unicode_test", {"datatype": "unicodeChar", "arraysize": "*"}),
("fixed_unicode_test", {"datatype": "unicodeChar", "arraysize": "10"}),
("string_array_test", {"datatype": "char", "arraysize": "4"}),
("unsignedByte", {"datatype": "unsignedByte"}),
("short", {"datatype": "short"}),
("int", {"datatype": "int"}),
("long", {"datatype": "long"}),
("double", {"datatype": "double"}),
("float", {"datatype": "float"}),
("array", {"datatype": "long", "arraysize": "2*"}),
("bit", {"datatype": "bit"}),
("bitarray", {"datatype": "bit", "arraysize": "3x2"}),
("bitvararray", {"datatype": "bit", "arraysize": "*"}),
("bitvararray2", {"datatype": "bit", "arraysize": "3x2*"}),
("floatComplex", {"datatype": "floatComplex"}),
("doubleComplex", {"datatype": "doubleComplex"}),
("doubleComplexArray", {"datatype": "doubleComplex", "arraysize": "*"}),
("doubleComplexArrayFixed", {"datatype": "doubleComplex", "arraysize": "2"}),
("boolean", {"datatype": "bit"}),
("booleanArray", {"datatype": "bit", "arraysize": "4"}),
("nulls", {"datatype": "int"}),
("nulls_array", {"datatype": "int", "arraysize": "2x2"}),
("precision1", {"datatype": "double"}),
("precision2", {"datatype": "double"}),
("doublearray", {"datatype": "double", "arraysize": "*"}),
("bitarray2", {"datatype": "bit", "arraysize": "16"}),
]
for field, type in zip(t.fields, field_types):
name, d = type
assert field.ID == name
assert (
field.datatype == d["datatype"]
), f'{name} expected {d["datatype"]} but get {field.datatype}'
if "arraysize" in d:
assert field.arraysize == d["arraysize"]
# W39: Bit values can not be masked
with pytest.warns(W39):
writeto(votable2, str(tmp_path / "through_table.xml"))
def test_read_from_tilde_path(home_is_data):
# Just test that these run without error for tilde-paths
path = os.path.join("~", "regression.xml")
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(path)
Table.read(path, format="votable", table_id="main_table")
def test_read_through_table_interface(tmp_path):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
with get_pkg_data_fileobj("data/regression.xml", encoding="binary") as fd:
t = Table.read(fd, format="votable", table_id="main_table")
assert len(t) == 5
# Issue 8354
assert t["float"].format is None
fn = tmp_path / "table_interface.xml"
# W39: Bit values can not be masked
with pytest.warns(W39):
t.write(fn, table_id="FOO", format="votable")
with open(fn, "rb") as fd:
t2 = Table.read(fd, format="votable", table_id="FOO")
assert len(t2) == 5
def test_read_through_table_interface2():
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
with get_pkg_data_fileobj("data/regression.xml", encoding="binary") as fd:
t = Table.read(fd, format="votable", table_id="last_table")
assert len(t) == 0
def test_pass_kwargs_through_table_interface():
# Table.read() should pass on keyword arguments meant for parse()
filename = get_pkg_data_filename("data/nonstandard_units.xml")
t = Table.read(filename, format="votable", unit_format="generic")
assert t["Flux1"].unit == Unit("erg / (Angstrom cm2 s)")
def test_names_over_ids():
with get_pkg_data_fileobj("data/names.xml", encoding="binary") as fd:
votable = parse(fd)
table = votable.get_first_table().to_table(use_names_over_ids=True)
assert table.colnames == [
"Name",
"GLON",
"GLAT",
"RAdeg",
"DEdeg",
"Jmag",
"Hmag",
"Kmag",
"G3.6mag",
"G4.5mag",
"G5.8mag",
"G8.0mag",
"4.5mag",
"8.0mag",
"Emag",
"24mag",
"f_Name",
]
def test_explicit_ids():
with get_pkg_data_fileobj("data/names.xml", encoding="binary") as fd:
votable = parse(fd)
table = votable.get_first_table().to_table(use_names_over_ids=False)
assert table.colnames == [
"col1",
"col2",
"col3",
"col4",
"col5",
"col6",
"col7",
"col8",
"col9",
"col10",
"col11",
"col12",
"col13",
"col14",
"col15",
"col16",
"col17",
]
def test_table_read_with_unnamed_tables():
"""
Issue #927
"""
with get_pkg_data_fileobj("data/names.xml", encoding="binary") as fd:
t = Table.read(fd, format="votable")
assert len(t) == 1
def test_votable_path_object():
"""
Testing when votable is passed as pathlib.Path object #4412.
"""
fpath = pathlib.Path(get_pkg_data_filename("data/names.xml"))
table = parse(fpath).get_first_table().to_table()
assert len(table) == 1
assert int(table[0][3]) == 266
def test_from_table_without_mask():
t = Table()
c = Column(data=[1, 2, 3], name="a")
t.add_column(c)
output = io.BytesIO()
t.write(output, format="votable")
def test_write_with_format():
t = Table()
c = Column(data=[1, 2, 3], name="a")
t.add_column(c)
output = io.BytesIO()
t.write(output, format="votable", tabledata_format="binary")
obuff = output.getvalue()
assert b'VOTABLE version="1.4"' in obuff
assert b"BINARY" in obuff
assert b"TABLEDATA" not in obuff
output = io.BytesIO()
t.write(output, format="votable", tabledata_format="binary2")
obuff = output.getvalue()
assert b'VOTABLE version="1.4"' in obuff
assert b"BINARY2" in obuff
assert b"TABLEDATA" not in obuff
def test_write_overwrite(tmp_path):
t = simple_table(3, 3)
filename = tmp_path / "overwrite_test.vot"
t.write(filename, format="votable")
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(filename, format="votable")
t.write(filename, format="votable", overwrite=True)
def test_write_tilde_path(home_is_tmpdir):
fname = os.path.join("~", "output")
t = Table()
t["a"] = [1, 2, 3]
t.write(fname, format="votable", tabledata_format="binary")
# Ensure the tilde-prefixed path wasn't treated literally
assert not os.path.exists(fname)
with open(os.path.expanduser(fname)) as f:
obuff = f.read()
assert 'VOTABLE version="1.4"' in obuff
assert "BINARY" in obuff
assert "TABLEDATA" not in obuff
@pytest.mark.parametrize("path_format", ["plain", "tilde"])
def test_writeto(path_format, tmp_path, home_is_tmpdir):
if path_format == "plain":
# pathlib.Path objects are not accepted by votable.writeto, so convert
# to a string
fname = str(tmp_path / "writeto_test.vot")
else:
fname = os.path.join("~", "writeto_test.vot")
t = Table()
t["a"] = [1, 2, 3]
vt = from_table(t)
writeto(vt, fname)
if path_format == "tilde":
# Ensure the tilde-prefixed path wasn't treated literally
assert not os.path.exists(fname)
with open(os.path.expanduser(fname)) as f:
obuff = f.read()
assert 'VOTABLE version="1.4"' in obuff
assert "BINARY" not in obuff
assert "TABLEDATA" in obuff
def test_empty_table():
votable = parse(get_pkg_data_filename("data/empty_table.xml"))
table = votable.get_first_table()
table.to_table()
def test_no_field_not_empty_table():
votable = parse(get_pkg_data_filename("data/no_field_not_empty_table.xml"))
table = votable.get_first_table()
assert len(table.fields) == 0
assert len(table.infos) == 1
def test_no_field_not_empty_table_exception():
with pytest.raises(E25):
parse(
get_pkg_data_filename("data/no_field_not_empty_table.xml"),
verify="exception",
)
def test_binary2_masked_strings():
"""
Issue #8995
"""
# Read a VOTable which sets the null mask bit for each empty string value.
votable = parse(get_pkg_data_filename("data/binary2_masked_strings.xml"))
table = votable.get_first_table()
astropy_table = table.to_table()
# Ensure string columns have no masked values and can be written out
assert not np.any(table.array.mask["epoch_photometry_url"])
output = io.BytesIO()
astropy_table.write(output, format="votable")
def test_validate_output_invalid():
"""
Issue #12603. Test that we get the correct output from votable.validate with an invalid
votable.
"""
# A votable with errors
invalid_votable_filepath = get_pkg_data_filename("data/regression.xml")
# When output is None, check that validate returns validation output as a string
validate_out = validate(invalid_votable_filepath, output=None)
assert isinstance(validate_out, str)
# Check for known error string
assert "E02: Incorrect number of elements in array." in validate_out
# When output is not set, check that validate returns a bool
validate_out = validate(invalid_votable_filepath)
assert isinstance(validate_out, bool)
# Check that validation output is correct (votable is not valid)
assert validate_out is False
def test_validate_output_valid():
"""
Issue #12603. Test that we get the correct output from votable.validate with a valid
votable
"""
# A valid votable. (Example from the votable standard:
# https://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html )
valid_votable_filepath = get_pkg_data_filename("data/valid_votable.xml")
# When output is None, check that validate returns validation output as a string
validate_out = validate(valid_votable_filepath, output=None)
assert isinstance(validate_out, str)
# Check for known good output string
assert "astropy.io.votable found no violations" in validate_out
# When output is not set, check that validate returns a bool
validate_out = validate(valid_votable_filepath)
assert isinstance(validate_out, bool)
# Check that validation output is correct (votable is valid)
assert validate_out is True
def test_validate_tilde_path(home_is_data):
validate(os.path.join("~", "valid_votable.xml"))
def test_is_votable_tilde_path(home_is_data):
assert is_votable(os.path.join("~", "valid_votable.xml"))
class TestVerifyOptions:
# Start off by checking the default (ignore)
def test_default(self):
parse(get_pkg_data_filename("data/gemini.xml"))
# Then try the various explicit options
def test_verify_ignore(self):
parse(get_pkg_data_filename("data/gemini.xml"), verify="ignore")
def test_verify_warn(self):
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename("data/gemini.xml"), verify="warn")
assert len(w) == 24
def test_verify_exception(self):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename("data/gemini.xml"), verify="exception")
# Make sure the deprecated pedantic option still works for now
def test_pedantic_false(self):
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename("data/gemini.xml"), pedantic=False)
assert len(w) == 25
def test_pedantic_true(self):
with pytest.warns(AstropyDeprecationWarning):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename("data/gemini.xml"), pedantic=True)
# Make sure that the default behavior can be set via configuration items
def test_conf_verify_ignore(self):
with conf.set_temp("verify", "ignore"):
parse(get_pkg_data_filename("data/gemini.xml"))
def test_conf_verify_warn(self):
with conf.set_temp("verify", "warn"):
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename("data/gemini.xml"))
assert len(w) == 24
def test_conf_verify_exception(self):
with conf.set_temp("verify", "exception"):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename("data/gemini.xml"))
# And make sure the old configuration item will keep working
def test_conf_pedantic_false(self, tmp_path):
with set_temp_config(tmp_path):
with open(tmp_path / "astropy" / "astropy.cfg", "w") as f:
f.write("[io.votable]\npedantic = False")
reload_config("astropy.io.votable")
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename("data/gemini.xml"))
assert len(w) == 25
def test_conf_pedantic_true(self, tmp_path):
with set_temp_config(tmp_path):
with open(tmp_path / "astropy" / "astropy.cfg", "w") as f:
f.write("[io.votable]\npedantic = True")
reload_config("astropy.io.votable")
with pytest.warns(AstropyDeprecationWarning):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename("data/gemini.xml"))
|
63c142487419a1bd1e05e2c7c95970077072b19b6036b1690dbc215197d7e35d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import pytest
from astropy.io.votable import tree
from astropy.io.votable.exceptions import W07, W08, W21, W41
from astropy.io.votable.table import parse
from astropy.io.votable.tree import Resource, VOTableFile
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_check_astroyear_fail():
config = {"verify": "exception"}
field = tree.Field(None, name="astroyear", arraysize="1")
with pytest.raises(W07):
tree.check_astroyear("X2100", field, config)
def test_string_fail():
config = {"verify": "exception"}
with pytest.raises(W08):
tree.check_string(42, "foo", config)
def test_make_Fields():
votable = tree.VOTableFile()
# ...with one resource...
resource = tree.Resource()
votable.resources.append(resource)
# ... with one table
table = tree.Table(votable)
resource.tables.append(table)
table.fields.extend(
[tree.Field(votable, name="Test", datatype="float", unit="mag")]
)
def test_unit_format():
data = parse(get_pkg_data_filename("data/irsa-nph-error.xml"))
assert data._config["version"] == "1.0"
assert tree._get_default_unit_format(data._config) == "cds"
data = parse(get_pkg_data_filename("data/names.xml"))
assert data._config["version"] == "1.1"
assert tree._get_default_unit_format(data._config) == "cds"
data = parse(get_pkg_data_filename("data/gemini.xml"))
assert data._config["version"] == "1.2"
assert tree._get_default_unit_format(data._config) == "cds"
data = parse(get_pkg_data_filename("data/binary2_masked_strings.xml"))
assert data._config["version"] == "1.3"
assert tree._get_default_unit_format(data._config) == "cds"
data = parse(get_pkg_data_filename("data/timesys.xml"))
assert data._config["version"] == "1.4"
assert tree._get_default_unit_format(data._config) == "vounit"
def test_namespace_warning():
"""
A version 1.4 VOTable must use the same namespace as 1.3.
(see https://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html#ToC16)
"""
bad_namespace = b"""<?xml version="1.0" encoding="utf-8"?>
<VOTABLE version="1.4" xmlns="http://www.ivoa.net/xml/VOTable/v1.4"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<RESOURCE/>
</VOTABLE>
"""
with pytest.warns(W41):
parse(io.BytesIO(bad_namespace), verify="exception")
good_namespace_14 = b"""<?xml version="1.0" encoding="utf-8"?>
<VOTABLE version="1.4" xmlns="http://www.ivoa.net/xml/VOTable/v1.3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<RESOURCE/>
</VOTABLE>
"""
parse(io.BytesIO(good_namespace_14), verify="exception")
good_namespace_13 = b"""<?xml version="1.0" encoding="utf-8"?>
<VOTABLE version="1.3" xmlns="http://www.ivoa.net/xml/VOTable/v1.3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<RESOURCE/>
</VOTABLE>
"""
parse(io.BytesIO(good_namespace_13), verify="exception")
def test_version():
"""
VOTableFile.__init__ allows versions of '1.0', '1.1', '1.2', '1.3' and '1.4'.
The '1.0' is curious since other checks in parse() and the version setter do not allow '1.0'.
This test confirms that behavior for now. A future change may remove the '1.0'.
"""
# Exercise the checks in __init__
with pytest.warns(AstropyDeprecationWarning):
VOTableFile(version="1.0")
for version in ("1.1", "1.2", "1.3", "1.4"):
VOTableFile(version=version)
for version in ("0.9", "2.0"):
with pytest.raises(
ValueError, match=r"should be in \('1.0', '1.1', '1.2', '1.3', '1.4'\)."
):
VOTableFile(version=version)
# Exercise the checks in the setter
vot = VOTableFile()
for version in ("1.1", "1.2", "1.3", "1.4"):
vot.version = version
for version in ("1.0", "2.0"):
with pytest.raises(
ValueError, match=r"supports VOTable versions '1.1', '1.2', '1.3', '1.4'$"
):
vot.version = version
# Exercise the checks in the parser.
begin = b'<?xml version="1.0" encoding="utf-8"?><VOTABLE version="'
middle = b'" xmlns="http://www.ivoa.net/xml/VOTable/v'
end = (
b'" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><RESOURCE/></VOTABLE>'
)
# Valid versions
for bversion in (b"1.1", b"1.2", b"1.3"):
parse(
io.BytesIO(begin + bversion + middle + bversion + end), verify="exception"
)
parse(io.BytesIO(begin + b"1.4" + middle + b"1.3" + end), verify="exception")
# Invalid versions
for bversion in (b"1.0", b"2.0"):
with pytest.warns(W21):
parse(
io.BytesIO(begin + bversion + middle + bversion + end),
verify="exception",
)
def votable_xml_string(version):
votable_file = VOTableFile(version=version)
votable_file.resources.append(Resource())
xml_bytes = io.BytesIO()
votable_file.to_xml(xml_bytes)
xml_bytes.seek(0)
bstring = xml_bytes.read()
s = bstring.decode("utf-8")
return s
def test_votable_tag():
xml = votable_xml_string("1.1")
assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.1"' in xml
assert 'xsi:noNamespaceSchemaLocation="http://www.ivoa.net/xml/VOTable/v1.1"' in xml
xml = votable_xml_string("1.2")
assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.2"' in xml
assert 'xsi:noNamespaceSchemaLocation="http://www.ivoa.net/xml/VOTable/v1.2"' in xml
xml = votable_xml_string("1.3")
assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.3"' in xml
assert 'xsi:schemaLocation="http://www.ivoa.net/xml/VOTable/v1.3 '
assert 'http://www.ivoa.net/xml/VOTable/VOTable-1.3.xsd"' in xml
xml = votable_xml_string("1.4")
assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.3"' in xml
assert 'xsi:schemaLocation="http://www.ivoa.net/xml/VOTable/v1.3 '
assert 'http://www.ivoa.net/xml/VOTable/VOTable-1.4.xsd"' in xml
|
99bda357d9b7afff1ed44ef041c4b002fd3d6554cc2f71dc2922b728cd92fc3d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
# THIRD-PARTY
import numpy as np
import pytest
from numpy.testing import assert_array_equal
# LOCAL
from astropy.io.votable import converters, exceptions, tree
from astropy.io.votable.table import parse_single_table
from astropy.utils.data import get_pkg_data_filename
def test_invalid_arraysize():
with pytest.raises(exceptions.E13):
field = tree.Field(None, name="broken", datatype="char", arraysize="foo")
converters.get_converter(field)
def test_oversize_char():
config = {"verify": "exception"}
with pytest.warns(exceptions.W47) as w:
field = tree.Field(None, name="c", datatype="char", config=config)
c = converters.get_converter(field, config=config)
assert len(w) == 1
with pytest.warns(exceptions.W46) as w:
c.parse("XXX")
assert len(w) == 1
def test_char_mask():
config = {"verify": "exception"}
field = tree.Field(None, name="c", arraysize="1", datatype="char", config=config)
c = converters.get_converter(field, config=config)
assert c.output("Foo", True) == ""
def test_oversize_unicode():
config = {"verify": "exception"}
with pytest.warns(exceptions.W46) as w:
field = tree.Field(
None, name="c2", datatype="unicodeChar", arraysize="1", config=config
)
c = converters.get_converter(field, config=config)
c.parse("XXX")
assert len(w) == 1
def test_unicode_mask():
config = {"verify": "exception"}
field = tree.Field(
None, name="c", arraysize="1", datatype="unicodeChar", config=config
)
c = converters.get_converter(field, config=config)
assert c.output("Foo", True) == ""
def test_unicode_as_char():
config = {"verify": "exception"}
field = tree.Field(
None, name="unicode_in_char", datatype="char", arraysize="*", config=config
)
c = converters.get_converter(field, config=config)
# Test parsing.
c.parse("XYZ") # ASCII succeeds
with pytest.warns(
exceptions.W55,
match=(
r'FIELD \(unicode_in_char\) has datatype="char" but contains non-ASCII'
r" value"
),
):
c.parse("zła") # non-ASCII
# Test output.
c.output("XYZ", False) # ASCII str succeeds
c.output(b"XYZ", False) # ASCII bytes succeeds
value = "zła"
value_bytes = value.encode("utf-8")
with pytest.warns(exceptions.E24, match=r"E24: Attempt to write non-ASCII value"):
c.output(value, False) # non-ASCII str raises
with pytest.warns(exceptions.E24, match=r"E24: Attempt to write non-ASCII value"):
c.output(value_bytes, False) # non-ASCII bytes raises
def test_unicode_as_char_binary():
config = {"verify": "exception"}
field = tree.Field(
None, name="unicode_in_char", datatype="char", arraysize="*", config=config
)
c = converters.get_converter(field, config=config)
c._binoutput_var("abc", False) # ASCII succeeds
with pytest.raises(exceptions.E24, match=r"E24: Attempt to write non-ASCII value"):
c._binoutput_var("zła", False)
field = tree.Field(
None, name="unicode_in_char", datatype="char", arraysize="3", config=config
)
c = converters.get_converter(field, config=config)
c._binoutput_fixed("xyz", False)
with pytest.raises(exceptions.E24, match=r"E24: Attempt to write non-ASCII value"):
c._binoutput_fixed("zła", False)
def test_wrong_number_of_elements():
config = {"verify": "exception"}
field = tree.Field(None, name="c", datatype="int", arraysize="2x3*", config=config)
c = converters.get_converter(field, config=config)
with pytest.raises(exceptions.E02):
c.parse("2 3 4 5 6")
def test_float_mask():
config = {"verify": "exception"}
field = tree.Field(None, name="c", datatype="float", config=config)
c = converters.get_converter(field, config=config)
assert c.parse("") == (c.null, True)
with pytest.raises(ValueError):
c.parse("null")
def test_float_mask_permissive():
config = {"verify": "ignore"}
field = tree.Field(None, name="c", datatype="float", config=config)
# config needs to be also passed into parse() to work.
# https://github.com/astropy/astropy/issues/8775
c = converters.get_converter(field, config=config)
assert c.parse("null", config=config) == (c.null, True)
def test_double_array():
config = {"verify": "exception", "version_1_3_or_later": True}
field = tree.Field(None, name="c", datatype="double", arraysize="3", config=config)
data = (1.0, 2.0, 3.0)
c = converters.get_converter(field, config=config)
assert c.output(1.0, False) == "1"
assert c.output(1.0, [False, False]) == "1"
assert c.output(data, False) == "1 2 3"
assert c.output(data, [False, False, False]) == "1 2 3"
assert c.output(data, [False, False, True]) == "1 2 NaN"
assert c.output(data, [False, False]) == "1 2"
a = c.parse("1 2 3", config=config)
assert_array_equal(a[0], data)
assert_array_equal(a[1], False)
with pytest.raises(exceptions.E02):
c.parse("1", config=config)
with pytest.raises(AttributeError), pytest.warns(exceptions.E02):
c.parse("1")
with pytest.raises(exceptions.E02):
c.parse("2 3 4 5 6", config=config)
with pytest.warns(exceptions.E02):
a = c.parse("2 3 4 5 6")
assert_array_equal(a[0], [2, 3, 4])
assert_array_equal(a[1], False)
def test_complex_array_vararray():
config = {"verify": "exception"}
field = tree.Field(
None, name="c", datatype="floatComplex", arraysize="2x3*", config=config
)
c = converters.get_converter(field, config=config)
with pytest.raises(exceptions.E02):
c.parse("2 3 4 5 6")
def test_complex_array_vararray2():
config = {"verify": "exception"}
field = tree.Field(
None, name="c", datatype="floatComplex", arraysize="2x3*", config=config
)
c = converters.get_converter(field, config=config)
x = c.parse("")
assert len(x[0]) == 0
def test_complex_array_vararray3():
config = {"verify": "exception"}
field = tree.Field(
None, name="c", datatype="doubleComplex", arraysize="2x3*", config=config
)
c = converters.get_converter(field, config=config)
x = c.parse("1 2 3 4 5 6 7 8 9 10 11 12")
assert len(x) == 2
assert np.all(x[0][0][0] == complex(1, 2))
def test_complex_vararray():
config = {"verify": "exception"}
field = tree.Field(
None, name="c", datatype="doubleComplex", arraysize="*", config=config
)
c = converters.get_converter(field, config=config)
x = c.parse("1 2 3 4")
assert len(x) == 2
assert x[0][0] == complex(1, 2)
def test_complex():
config = {"verify": "exception"}
field = tree.Field(None, name="c", datatype="doubleComplex", config=config)
c = converters.get_converter(field, config=config)
with pytest.raises(exceptions.E03):
c.parse("1 2 3")
def test_bit():
config = {"verify": "exception"}
field = tree.Field(None, name="c", datatype="bit", config=config)
c = converters.get_converter(field, config=config)
with pytest.raises(exceptions.E04):
c.parse("T")
def test_bit_mask():
config = {"verify": "exception"}
with pytest.warns(exceptions.W39) as w:
field = tree.Field(None, name="c", datatype="bit", config=config)
c = converters.get_converter(field, config=config)
c.output(True, True)
assert len(w) == 1
def test_boolean():
config = {"verify": "exception"}
field = tree.Field(None, name="c", datatype="boolean", config=config)
c = converters.get_converter(field, config=config)
with pytest.raises(exceptions.E05):
c.parse("YES")
def test_boolean_array():
config = {"verify": "exception"}
field = tree.Field(None, name="c", datatype="boolean", arraysize="*", config=config)
c = converters.get_converter(field, config=config)
r, mask = c.parse("TRUE FALSE T F 0 1")
assert_array_equal(r, [True, False, True, False, False, True])
def test_invalid_type():
config = {"verify": "exception"}
with pytest.raises(exceptions.E06):
field = tree.Field(None, name="c", datatype="foobar", config=config)
converters.get_converter(field, config=config)
def test_precision():
config = {"verify": "exception"}
field = tree.Field(None, name="c", datatype="float", precision="E4", config=config)
c = converters.get_converter(field, config=config)
assert c.output(266.248, False) == "266.2"
field = tree.Field(None, name="c", datatype="float", precision="F4", config=config)
c = converters.get_converter(field, config=config)
assert c.output(266.248, False) == "266.2480"
def test_integer_overflow():
config = {"verify": "exception"}
field = tree.Field(None, name="c", datatype="int", config=config)
c = converters.get_converter(field, config=config)
with pytest.raises(exceptions.W51):
c.parse("-2208988800", config=config)
def test_float_default_precision():
config = {"verify": "exception"}
field = tree.Field(None, name="c", datatype="float", arraysize="4", config=config)
c = converters.get_converter(field, config=config)
assert (
c.output([1, 2, 3, 8.9990234375], [False, False, False, False])
== "1 2 3 8.9990234375"
)
def test_vararray():
votable = tree.VOTableFile()
resource = tree.Resource()
votable.resources.append(resource)
table = tree.Table(votable)
resource.tables.append(table)
tabarr = []
heads = ["headA", "headB", "headC"]
types = ["char", "double", "int"]
vals = [["A", 1.0, 2], ["B", 2.0, 3], ["C", 3.0, 4]]
for i in range(len(heads)):
tabarr.append(
tree.Field(votable, name=heads[i], datatype=types[i], arraysize="*")
)
table.fields.extend(tabarr)
table.create_arrays(len(vals))
for i in range(len(vals)):
values = tuple(vals[i])
table.array[i] = values
buff = io.BytesIO()
votable.to_xml(buff)
def test_gemini_v1_2():
"""
see Pull Request 4782 or Issue 4781 for details
"""
table = parse_single_table(get_pkg_data_filename("data/gemini.xml"))
assert table is not None
tt = table.to_table()
assert (
tt["access_url"][0]
== "http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/data/pub/GEMINI/"
"S20120515S0064?runid=bx9b1o8cvk1qesrt"
)
|
3bc2c907d7d1927ea8d8c0f0976ee6869218f576f79443f55453001206cf4395 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains a class to handle a validation result for a single VOTable
file.
"""
# STDLIB
import hashlib
import http.client
import os
import pickle
import shutil
import socket
import subprocess
import urllib.error
import urllib.request
import warnings
from xml.parsers.expat import ExpatError
# VO
from astropy.io.votable import exceptions, table, xmlutil
class Result:
def __init__(self, url, root="results", timeout=10):
self.url = url
m = hashlib.md5()
m.update(url)
self._hash = m.hexdigest()
self._root = root
self._path = os.path.join(self._hash[0:2], self._hash[2:4], self._hash[4:])
if not os.path.exists(self.get_dirpath()):
os.makedirs(self.get_dirpath())
self.timeout = timeout
self.load_attributes()
def __enter__(self):
return self
def __exit__(self, *args):
self.save_attributes()
def get_dirpath(self):
return os.path.join(self._root, self._path)
def get_htmlpath(self):
return self._path
def get_attribute_path(self):
return os.path.join(self.get_dirpath(), "values.dat")
def get_vo_xml_path(self):
return os.path.join(self.get_dirpath(), "vo.xml")
# ATTRIBUTES
def load_attributes(self):
path = self.get_attribute_path()
if os.path.exists(path):
try:
with open(path, "rb") as fd:
self._attributes = pickle.load(fd)
except Exception:
shutil.rmtree(self.get_dirpath())
os.makedirs(self.get_dirpath())
self._attributes = {}
else:
self._attributes = {}
def save_attributes(self):
path = self.get_attribute_path()
with open(path, "wb") as fd:
pickle.dump(self._attributes, fd)
def __getitem__(self, key):
return self._attributes[key]
def __setitem__(self, key, val):
self._attributes[key] = val
def __contains__(self, key):
return key in self._attributes
# VO XML
def download_xml_content(self):
path = self.get_vo_xml_path()
if "network_error" not in self._attributes:
self["network_error"] = None
if os.path.exists(path):
return
def fail(reason):
reason = str(reason)
with open(path, "wb") as fd:
fd.write(f"FAILED: {reason}\n".encode())
self["network_error"] = reason
r = None
try:
r = urllib.request.urlopen(self.url.decode("ascii"), timeout=self.timeout)
except urllib.error.URLError as e:
if hasattr(e, "reason"):
reason = e.reason
else:
reason = e.code
fail(reason)
return
except http.client.HTTPException as e:
fail(f"HTTPException: {str(e)}")
return
except (socket.timeout, OSError) as e:
fail("Timeout")
return
if r is None:
fail("Invalid URL")
return
try:
content = r.read()
except socket.timeout as e:
fail("Timeout")
return
else:
r.close()
with open(path, "wb") as fd:
fd.write(content)
def get_xml_content(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
with open(path, "rb") as fd:
content = fd.read()
return content
def validate_vo(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
self["version"] = ""
if "network_error" in self and self["network_error"] is not None:
self["nwarnings"] = 0
self["nexceptions"] = 0
self["warnings"] = []
self["xmllint"] = None
self["warning_types"] = set()
return
nexceptions = 0
nwarnings = 0
t = None
lines = []
with open(path, "rb") as input:
with warnings.catch_warnings(record=True) as warning_lines:
try:
t = table.parse(input, verify="warn", filename=path)
except (ValueError, TypeError, ExpatError) as e:
lines.append(str(e))
nexceptions += 1
lines = [str(x.message) for x in warning_lines] + lines
if t is not None:
self["version"] = version = t.version
else:
self["version"] = version = "1.0"
if "xmllint" not in self:
# Now check the VO schema based on the version in
# the file.
try:
success, stdout, stderr = xmlutil.validate_schema(path, version)
# OSError is raised when XML file eats all memory and
# system sends kill signal.
except OSError as e:
self["xmllint"] = None
self["xmllint_content"] = str(e)
else:
self["xmllint"] = success == 0
self["xmllint_content"] = stderr
warning_types = set()
for line in lines:
w = exceptions.parse_vowarning(line)
if w["is_warning"]:
nwarnings += 1
if w["is_exception"]:
nexceptions += 1
warning_types.add(w["warning"])
self["nwarnings"] = nwarnings
self["nexceptions"] = nexceptions
self["warnings"] = lines
self["warning_types"] = warning_types
def has_warning(self, warning_code):
return warning_code in self["warning_types"]
def match_expectations(self):
if "network_error" not in self:
self["network_error"] = None
if self["expected"] == "good":
return (
not self["network_error"]
and self["nwarnings"] == 0
and self["nexceptions"] == 0
)
elif self["expected"] == "incorrect":
return not self["network_error"] and (
self["nwarnings"] > 0 or self["nexceptions"] > 0
)
elif self["expected"] == "broken":
return self["network_error"] is not None
def validate_with_votlint(self, path_to_stilts_jar):
filename = self.get_vo_xml_path()
p = subprocess.Popen(
["java", "-jar", path_to_stilts_jar, "votlint", "validate=false", filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = p.communicate()
if len(stdout) or p.returncode:
self["votlint"] = False
else:
self["votlint"] = True
self["votlint_content"] = stdout
def get_result_subsets(results, root, s=None):
all_results = []
correct = []
not_expected = []
fail_schema = []
schema_mismatch = []
fail_votlint = []
votlint_mismatch = []
network_failures = []
version_10 = []
version_11 = []
version_12 = []
version_unknown = []
has_warnings = []
warning_set = {}
has_exceptions = []
exception_set = {}
for url in results:
if s:
next(s)
if isinstance(url, Result):
x = url
else:
x = Result(url, root=root)
all_results.append(x)
if x["nwarnings"] == 0 and x["nexceptions"] == 0 and x["xmllint"] is True:
correct.append(x)
if not x.match_expectations():
not_expected.append(x)
if x["xmllint"] is False:
fail_schema.append(x)
if x["xmllint"] is False and x["nwarnings"] == 0 and x["nexceptions"] == 0:
schema_mismatch.append(x)
if "votlint" in x and x["votlint"] is False:
fail_votlint.append(x)
if "network_error" not in x:
x["network_error"] = None
if (
x["nwarnings"] == 0
and x["nexceptions"] == 0
and x["network_error"] is None
):
votlint_mismatch.append(x)
if "network_error" in x and x["network_error"] is not None:
network_failures.append(x)
version = x["version"]
if version == "1.0":
version_10.append(x)
elif version == "1.1":
version_11.append(x)
elif version == "1.2":
version_12.append(x)
else:
version_unknown.append(x)
if x["nwarnings"] > 0:
has_warnings.append(x)
for warning in x["warning_types"]:
if (
warning is not None
and len(warning) == 3
and warning.startswith("W")
):
warning_set.setdefault(warning, [])
warning_set[warning].append(x)
if x["nexceptions"] > 0:
has_exceptions.append(x)
for exc in x["warning_types"]:
if exc is not None and len(exc) == 3 and exc.startswith("E"):
exception_set.setdefault(exc, [])
exception_set[exc].append(x)
warning_set = list(warning_set.items())
warning_set.sort()
exception_set = list(exception_set.items())
exception_set.sort()
tables = [
("all", "All tests", all_results),
("correct", "Correct", correct),
("unexpected", "Unexpected", not_expected),
("schema", "Invalid against schema", fail_schema),
(
"schema_mismatch",
"Invalid against schema/Passed vo.table",
schema_mismatch,
["ul"],
),
("fail_votlint", "Failed votlint", fail_votlint),
(
"votlint_mismatch",
"Failed votlint/Passed vo.table",
votlint_mismatch,
["ul"],
),
("network_failures", "Network failures", network_failures),
("version1.0", "Version 1.0", version_10),
("version1.1", "Version 1.1", version_11),
("version1.2", "Version 1.2", version_12),
("version_unknown", "Version unknown", version_unknown),
("warnings", "Warnings", has_warnings),
]
for warning_code, warning in warning_set:
if s:
next(s)
warning_class = getattr(exceptions, warning_code, None)
if warning_class:
warning_descr = warning_class.get_short_name()
tables.append(
(
warning_code,
f"{warning_code}: {warning_descr}",
warning,
["ul", "li"],
)
)
tables.append(("exceptions", "Exceptions", has_exceptions))
for exception_code, exc in exception_set:
if s:
next(s)
exception_class = getattr(exceptions, exception_code, None)
if exception_class:
exception_descr = exception_class.get_short_name()
tables.append(
(
exception_code,
f"{exception_code}: {exception_descr}",
exc,
["ul", "li"],
)
)
return tables
|
167b29d73de63071a40fea0b3c4ef8dfae150d0f8d04e8f49351775611aacb58 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Validates a large collection of web-accessible VOTable files,
and generates a report as a directory tree of HTML files.
"""
# STDLIB
import os
# LOCAL
from astropy.utils.data import get_pkg_data_filename
from . import html, result
__all__ = ["make_validation_report"]
def get_srcdir():
return os.path.dirname(__file__)
def get_urls(destdir, s):
import gzip
types = ["good", "broken", "incorrect"]
seen = set()
urls = []
for type in types:
filename = get_pkg_data_filename(f"data/urls/cone.{type}.dat.gz")
with gzip.open(filename, "rb") as fd:
for url in fd.readlines():
next(s)
url = url.strip()
if url not in seen:
with result.Result(url, root=destdir) as r:
r["expected"] = type
urls.append(url)
seen.add(url)
return urls
def download(args):
url, destdir = args
with result.Result(url, root=destdir) as r:
r.download_xml_content()
def validate_vo(args):
url, destdir = args
with result.Result(url, root=destdir) as r:
r.validate_vo()
def votlint_validate(args):
path_to_stilts_jar, url, destdir = args
with result.Result(url, root=destdir) as r:
if r["network_error"] is None:
r.validate_with_votlint(path_to_stilts_jar)
def write_html_result(args):
url, destdir = args
with result.Result(url, root=destdir) as r:
html.write_result(r)
def write_subindex(args):
subset, destdir, total = args
html.write_index_table(destdir, *subset, total=total)
def make_validation_report(
urls=None,
destdir="astropy.io.votable.validator.results",
multiprocess=True,
stilts=None,
):
"""
Validates a large collection of web-accessible VOTable files.
Generates a report as a directory tree of HTML files.
Parameters
----------
urls : list of str, optional
If provided, is a list of HTTP urls to download VOTable files
from. If not provided, a built-in set of ~22,000 urls
compiled by HEASARC will be used.
destdir : path-like, optional
The directory to write the report to. By default, this is a
directory called ``'results'`` in the current directory. If the
directory does not exist, it will be created.
multiprocess : bool, optional
If `True` (default), perform validations in parallel using all
of the cores on this machine.
stilts : path-like, optional
To perform validation with ``votlint`` from the the Java-based
`STILTS <http://www.star.bris.ac.uk/~mbt/stilts/>`_ VOTable
parser, in addition to `astropy.io.votable`, set this to the
path of the ``'stilts.jar'`` file. ``java`` on the system shell
path will be used to run it.
Notes
-----
Downloads of each given URL will be performed only once and cached
locally in *destdir*. To refresh the cache, remove *destdir*
first.
"""
from astropy.utils.console import ProgressBar, Spinner, color_print
if stilts is not None:
if not os.path.exists(stilts):
raise ValueError(f"{stilts} does not exist.")
destdir = os.path.expanduser(destdir)
destdir = os.path.abspath(destdir)
if urls is None:
with Spinner("Loading URLs", "green") as s:
urls = get_urls(destdir, s)
else:
urls = [url.encode() for url in urls if isinstance(url, str)]
color_print("Marking URLs", "green")
for url in ProgressBar(urls):
with result.Result(url, root=destdir) as r:
r["expected"] = type
args = [(url, destdir) for url in urls]
color_print("Downloading VO files", "green")
ProgressBar.map(download, args, multiprocess=multiprocess)
color_print("Validating VO files", "green")
ProgressBar.map(validate_vo, args, multiprocess=multiprocess)
if stilts is not None:
color_print("Validating with votlint", "green")
votlint_args = [(stilts, x, destdir) for x in urls]
ProgressBar.map(votlint_validate, votlint_args, multiprocess=multiprocess)
color_print("Generating HTML files", "green")
ProgressBar.map(write_html_result, args, multiprocess=multiprocess)
with Spinner("Grouping results", "green") as s:
subsets = result.get_result_subsets(urls, destdir, s)
color_print("Generating index", "green")
html.write_index(subsets, urls, destdir)
color_print("Generating subindices", "green")
subindex_args = [(subset, destdir, len(urls)) for subset in subsets]
ProgressBar.map(write_subindex, subindex_args, multiprocess=multiprocess)
|
b76cc9e6005c736c0029e5617032da38ac9c915fae56886b525dc9479484b9fc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from . import main
from .main import make_validation_report
__doc__ = main.__doc__
del main
|
346519a3b878733dde72adb8ddc26259517b31c12cc11eac99b6626e62b36b36 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import contextlib
import os
import re
from math import ceil
from astropy import online_docs_root
from astropy.io.votable import exceptions
from astropy.utils.xml.writer import XMLWriter, xml_escape
html_header = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html
PUBLIC "-//W3C//DTD XHTML Basic 1.0//EN"
"http://www.w3.org/TR/xhtml-basic/xhtml-basic10.dtd">
"""
default_style = """
body {
font-family: sans-serif
}
a {
text-decoration: none
}
.highlight {
color: red;
font-weight: bold;
text-decoration: underline;
}
.green { background-color: #ddffdd }
.red { background-color: #ffdddd }
.yellow { background-color: #ffffdd }
tr:hover { background-color: #dddddd }
table {
border-width: 1px;
border-spacing: 0px;
border-style: solid;
border-color: gray;
border-collapse: collapse;
background-color: white;
padding: 5px;
}
table th {
border-width: 1px;
padding: 5px;
border-style: solid;
border-color: gray;
}
table td {
border-width: 1px;
padding: 5px;
border-style: solid;
border-color: gray;
}
"""
@contextlib.contextmanager
def make_html_header(w):
w.write(html_header)
with w.tag("html", xmlns="http://www.w3.org/1999/xhtml", lang="en-US"):
with w.tag("head"):
w.element("title", "VO Validation results")
w.element("style", default_style)
with w.tag("body"):
yield
def write_source_line(w, line, nchar=0):
part1 = xml_escape(line[:nchar].decode("utf-8"))
char = xml_escape(line[nchar : nchar + 1].decode("utf-8"))
part2 = xml_escape(line[nchar + 1 :].decode("utf-8"))
w.write(" ")
w.write(part1)
w.write(f'<span class="highlight">{char}</span>')
w.write(part2)
w.write("\n\n")
def write_warning(w, line, xml_lines):
warning = exceptions.parse_vowarning(line)
if not warning["is_something"]:
w.data(line)
else:
w.write(f"Line {warning['nline']:d}: ")
if warning["warning"]:
w.write(
'<a href="{}/{}">{}</a>: '.format(
online_docs_root, warning["doc_url"], warning["warning"]
)
)
msg = warning["message"]
if not isinstance(warning["message"], str):
msg = msg.decode("utf-8")
w.write(xml_escape(msg))
w.write("\n")
if 1 <= warning["nline"] < len(xml_lines):
write_source_line(w, xml_lines[warning["nline"] - 1], warning["nchar"])
def write_votlint_warning(w, line, xml_lines):
match = re.search(
r"(WARNING|ERROR|INFO) \(l.(?P<line>[0-9]+), c.(?P<column>[0-9]+)\):"
r" (?P<rest>.*)",
line,
)
if match:
w.write(
"Line {:d}: {}\n".format(
int(match.group("line")), xml_escape(match.group("rest"))
)
)
write_source_line(
w, xml_lines[int(match.group("line")) - 1], int(match.group("column")) - 1
)
else:
w.data(line)
w.data("\n")
def write_result(result):
if "network_error" in result and result["network_error"] is not None:
return
xml = result.get_xml_content()
xml_lines = xml.splitlines()
path = os.path.join(result.get_dirpath(), "index.html")
with open(path, "w", encoding="utf-8") as fd:
w = XMLWriter(fd)
with make_html_header(w):
with w.tag("p"):
with w.tag("a", href="vo.xml"):
w.data(result.url.decode("ascii"))
w.element("hr")
with w.tag("pre"):
w._flush()
for line in result["warnings"]:
write_warning(w, line, xml_lines)
if result["xmllint"] is False:
w.element("hr")
w.element("p", "xmllint results:")
content = result["xmllint_content"]
if not isinstance(content, str):
content = content.decode("ascii")
content = content.replace(result.get_dirpath() + "/", "")
with w.tag("pre"):
w.data(content)
if "votlint" in result:
if result["votlint"] is False:
w.element("hr")
w.element("p", "votlint results:")
content = result["votlint_content"]
if not isinstance(content, str):
content = content.decode("ascii")
with w.tag("pre"):
w._flush()
for line in content.splitlines():
write_votlint_warning(w, line, xml_lines)
def write_result_row(w, result):
with w.tag("tr"):
with w.tag("td"):
if "network_error" in result and result["network_error"] is not None:
w.data(result.url.decode("ascii"))
else:
w.element(
"a",
result.url.decode("ascii"),
href=f"{result.get_htmlpath()}/index.html",
)
if "network_error" in result and result["network_error"] is not None:
w.element("td", str(result["network_error"]), attrib={"class": "red"})
w.element("td", "-")
w.element("td", "-")
w.element("td", "-")
w.element("td", "-")
else:
w.element("td", "-", attrib={"class": "green"})
if result["nexceptions"]:
cls = "red"
msg = "Fatal"
elif result["nwarnings"]:
cls = "yellow"
msg = str(result["nwarnings"])
else:
cls = "green"
msg = "-"
w.element("td", msg, attrib={"class": cls})
msg = result["version"]
if result["xmllint"] is None:
cls = ""
elif result["xmllint"] is False:
cls = "red"
else:
cls = "green"
w.element("td", msg, attrib={"class": cls})
if result["expected"] == "good":
cls = "green"
msg = "-"
elif result["expected"] == "broken":
cls = "red"
msg = "net"
elif result["expected"] == "incorrect":
cls = "yellow"
msg = "invalid"
w.element("td", msg, attrib={"class": cls})
if "votlint" in result:
if result["votlint"]:
cls = "green"
msg = "Passed"
else:
cls = "red"
msg = "Failed"
else:
cls = ""
msg = "?"
w.element("td", msg, attrib={"class": cls})
def write_table(basename, name, results, root="results", chunk_size=500):
def write_page_links(j):
if npages <= 1:
return
with w.tag("center"):
if j > 0:
w.element("a", "<< ", href=f"{basename}_{j - 1:02d}.html")
for i in range(npages):
if i == j:
w.data(str(i + 1))
else:
w.element("a", str(i + 1), href=f"{basename}_{i:02d}.html")
w.data(" ")
if j < npages - 1:
w.element("a", ">>", href=f"{basename}_{j + 1:02d}.html")
npages = int(ceil(float(len(results)) / chunk_size))
for i, j in enumerate(range(0, max(len(results), 1), chunk_size)):
subresults = results[j : j + chunk_size]
path = os.path.join(root, f"{basename}_{i:02d}.html")
with open(path, "w", encoding="utf-8") as fd:
w = XMLWriter(fd)
with make_html_header(w):
write_page_links(i)
w.element("h2", name)
with w.tag("table"):
with w.tag("tr"):
w.element("th", "URL")
w.element("th", "Network")
w.element("th", "Warnings")
w.element("th", "Schema")
w.element("th", "Expected")
w.element("th", "votlint")
for result in subresults:
write_result_row(w, result)
write_page_links(i)
def add_subset(w, basename, name, subresults, inside=["p"], total=None):
with w.tag("tr"):
subresults = list(subresults)
if total is None:
total = len(subresults)
if total == 0: # pragma: no cover
percentage = 0.0
else:
percentage = float(len(subresults)) / total
with w.tag("td"):
for element in inside:
w.start(element)
w.element("a", name, href=f"{basename}_00.html")
for element in reversed(inside):
w.end(element)
numbers = f"{len(subresults):d} ({percentage:.2%})"
with w.tag("td"):
w.data(numbers)
def write_index(subsets, results, root="results"):
path = os.path.join(root, "index.html")
with open(path, "w", encoding="utf-8") as fd:
w = XMLWriter(fd)
with make_html_header(w):
w.element("h1", "VO Validation results")
with w.tag("table"):
for subset in subsets:
add_subset(w, *subset, total=len(results))
def write_index_table(
root, basename, name, subresults, inside=None, total=None, chunk_size=500
):
if total is None:
total = len(subresults)
percentage = float(len(subresults)) / total
numbers = f"{len(subresults):d} ({percentage:.2%})"
write_table(basename, name + " " + numbers, subresults, root, chunk_size)
|
3c064907ccf1ba7b51d2bfd5fabbf1e4c8d13509c5ff0023a9596206d270fdb0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from functools import wraps
import pytest
from astropy.utils.compat.optional_deps import HAS_PYTEST_MPL
def figure_test(*args, **kwargs):
"""
A decorator that defines a figure test.
This automatically decorates tests with mpl_image_compare with common
options used by all figure tests in astropy, and also adds the decorator
to allow remote data to be accessed.
"""
# NOTE: the savefig_kwargs option below is to avoid using PNG files with
# the matplotlib version embedded since this changes for every developer
# version.
tolerance = kwargs.pop("tolerance", 0)
style = kwargs.pop("style", {})
savefig_kwargs = kwargs.pop("savefig_kwargs", {})
savefig_kwargs["metadata"] = {"Software": None}
def decorator(test_function):
@pytest.mark.remote_data
@pytest.mark.mpl_image_compare(
tolerance=tolerance, style=style, savefig_kwargs=savefig_kwargs, **kwargs
)
@pytest.mark.skipif(
not HAS_PYTEST_MPL, reason="pytest-mpl is required for the figure tests"
)
@wraps(test_function)
def test_wrapper(*args, **kwargs):
return test_function(*args, **kwargs)
return test_wrapper
# If the decorator was used without any arguments, the only positional
# argument will be the test to decorate so we do the following:
if len(args) == 1:
return decorator(*args)
return decorator
|
9e28bc34148da05fd99f2ba7099370f809642edf73b6344a86664429fccd6154 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .helpers import figure_test
|
8dfaafdd079b2bed05817d16f9a4a43c45f250c7f96db3b14c5e94039a153db1 | import pytest
# Renamed these imports so that them being in the namespace will not
# cause pytest 3 to discover them as tests and then complain that
# they have __init__ defined.
from astropy.tests.runner import TestRunner as _TestRunner
from astropy.tests.runner import TestRunnerBase as _TestRunnerBase
from astropy.tests.runner import keyword
def test_disable_kwarg():
class no_remote_data(_TestRunner):
@keyword()
def remote_data(self, remote_data, kwargs):
return NotImplemented
r = no_remote_data(".")
with pytest.raises(TypeError):
r.run_tests(remote_data="bob")
def test_wrong_kwarg():
r = _TestRunner(".")
with pytest.raises(TypeError):
r.run_tests(spam="eggs")
def test_invalid_kwarg():
class bad_return(_TestRunnerBase):
@keyword()
def remote_data(self, remote_data, kwargs):
return "bob"
r = bad_return(".")
with pytest.raises(TypeError):
r.run_tests(remote_data="bob")
def test_new_kwarg():
class Spam(_TestRunnerBase):
@keyword()
def spam(self, spam, kwargs):
return [spam]
r = Spam(".")
args = r._generate_args(spam="spam")
assert ["spam"] == args
def test_priority():
class Spam(_TestRunnerBase):
@keyword()
def spam(self, spam, kwargs):
return [spam]
@keyword(priority=1)
def eggs(self, eggs, kwargs):
return [eggs]
r = Spam(".")
args = r._generate_args(spam="spam", eggs="eggs")
assert ["eggs", "spam"] == args
def test_docs():
class Spam(_TestRunnerBase):
@keyword()
def spam(self, spam, kwargs):
"""
Spam Spam Spam
"""
return [spam]
@keyword()
def eggs(self, eggs, kwargs):
"""
eggs asldjasljd
"""
return [eggs]
r = Spam(".")
assert "eggs" in r.run_tests.__doc__
assert "Spam Spam Spam" in r.run_tests.__doc__
|
40f8bfb3a178da0774af9bdefd38c550917bdc80ef4ffb13e3e07aa17974c6e1 | import pytest
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
def test_assert_quantity_allclose():
assert_quantity_allclose([1, 2], [1, 2])
assert_quantity_allclose([1, 2] * u.m, [100, 200] * u.cm)
assert_quantity_allclose([1, 2] * u.m, [101, 201] * u.cm, atol=2 * u.cm)
with pytest.raises(AssertionError, match=r"\nNot equal to tolerance"):
assert_quantity_allclose([1, 2] * u.m, [90, 200] * u.cm)
with pytest.raises(AssertionError):
assert_quantity_allclose([1, 2] * u.m, [101, 201] * u.cm, atol=0.5 * u.cm)
with pytest.raises(
u.UnitsError,
match=r"Units for 'desired' \(\) and 'actual' \(m\) are not convertible",
):
assert_quantity_allclose([1, 2] * u.m, [100, 200])
with pytest.raises(
u.UnitsError,
match=r"Units for 'desired' \(cm\) and 'actual' \(\) are not convertible",
):
assert_quantity_allclose([1, 2], [100, 200] * u.cm)
with pytest.raises(
u.UnitsError,
match=r"Units for 'atol' \(\) and 'actual' \(m\) are not convertible",
):
assert_quantity_allclose([1, 2] * u.m, [100, 200] * u.cm, atol=0.3)
with pytest.raises(
u.UnitsError,
match=r"Units for 'atol' \(m\) and 'actual' \(\) are not convertible",
):
assert_quantity_allclose([1, 2], [1, 2], atol=0.3 * u.m)
with pytest.raises(u.UnitsError, match=r"'rtol' should be dimensionless"):
assert_quantity_allclose([1, 2], [1, 2], rtol=0.3 * u.m)
|
8473d623b3d9de5c0c01067c726b3e11aaf5e879bc247c64ed12d4bc19cb6d58 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
# test helper.run_tests function
from astropy import test as run_tests
# run_tests should raise ValueError when asked to run on a module it can't find
def test_module_not_found():
with pytest.raises(ValueError):
run_tests(package="fake.module")
# run_tests should raise ValueError when passed an invalid pastebin= option
def test_pastebin_keyword():
with pytest.raises(ValueError):
run_tests(pastebin="not_an_option")
def test_unicode_literal_conversion():
assert isinstance("ångström", str)
|
32a42fde0d336b9e3ba8de59ed92d109ba9abca35e342058feba917408215559 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pkgutil
def test_imports():
"""
This just imports all modules in astropy, making sure they don't have any
dependencies that sneak through
"""
def onerror(name):
# We should raise any legitimate error that occurred, but not
# any warnings which happen to be caught because of our pytest
# settings (e.g., DeprecationWarning).
try:
raise
except Warning:
pass
for imper, nm, ispkg in pkgutil.walk_packages(
["astropy"], "astropy.", onerror=onerror
):
imper.find_spec(nm)
def test_toplevel_namespace():
import astropy
d = dir(astropy)
assert "os" not in d
assert "log" in d
assert "test" in d
assert "sys" not in d
|
1b8c12bdb6eb6e6ac9c5728423f22046b8e97c0e349d33e77b33ed85cd8d48fa | from astropy.timeseries.periodograms.base import *
from astropy.timeseries.periodograms.bls import *
from astropy.timeseries.periodograms.lombscargle import *
|
8be5d2516d8e6f16a154a265777b04d92ce06a34b3090ee286d4f1822df42d44 | import abc
import numpy as np
from astropy.timeseries.binned import BinnedTimeSeries
from astropy.timeseries.sampled import TimeSeries
__all__ = ["BasePeriodogram"]
class BasePeriodogram:
@abc.abstractmethod
def __init__(self, t, y, dy=None):
pass
@classmethod
def from_timeseries(
cls, timeseries, signal_column_name=None, uncertainty=None, **kwargs
):
"""
Initialize a periodogram from a time series object.
If a binned time series is passed, the time at the center of the bins is
used. Also note that this method automatically gets rid of NaN/undefined
values when initializing the periodogram.
Parameters
----------
signal_column_name : str
The name of the column containing the signal values to use.
uncertainty : str or float or `~astropy.units.Quantity`, optional
The name of the column containing the errors on the signal, or the
value to use for the error, if a scalar.
**kwargs
Additional keyword arguments are passed to the initializer for this
periodogram class.
"""
if signal_column_name is None:
raise ValueError("signal_column_name should be set to a valid column name")
y = timeseries[signal_column_name]
keep = ~np.isnan(y)
if isinstance(uncertainty, str):
dy = timeseries[uncertainty]
keep &= ~np.isnan(dy)
dy = dy[keep]
else:
dy = uncertainty
if isinstance(timeseries, TimeSeries):
time = timeseries.time
elif isinstance(timeseries, BinnedTimeSeries):
time = timeseries.time_bin_center
else:
raise TypeError(
"Input time series should be an instance of "
"TimeSeries or BinnedTimeSeries"
)
return cls(time[keep], y[keep], dy=dy, **kwargs)
|
2ae93f74f8f3102a027828f44c8e7c44d119f8bfd0a8ecef34e2ae01daa4b6df | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from astropy.io import fits, registry
from astropy.table import MaskedColumn, Table
from astropy.time import Time, TimeDelta
from astropy.timeseries.sampled import TimeSeries
__all__ = ["kepler_fits_reader"]
def kepler_fits_reader(filename):
"""
This serves as the FITS reader for KEPLER or TESS files within
astropy-timeseries.
This function should generally not be called directly, and instead this
time series reader should be accessed with the
:meth:`~astropy.timeseries.TimeSeries.read` method::
>>> from astropy.timeseries import TimeSeries
>>> ts = TimeSeries.read('kplr33122.fits', format='kepler.fits') # doctest: +SKIP
Parameters
----------
filename : `str` or `pathlib.Path`
File to load.
Returns
-------
ts : `~astropy.timeseries.TimeSeries`
Data converted into a TimeSeries.
"""
hdulist = fits.open(filename)
# Get the lightcurve HDU
telescope = hdulist[0].header["telescop"].lower()
if telescope == "tess":
hdu = hdulist["LIGHTCURVE"]
elif telescope == "kepler":
hdu = hdulist[1]
else:
raise NotImplementedError(
f"{hdulist[0].header['telescop']} is not implemented, only KEPLER or TESS"
" are supported through this reader"
)
if hdu.header["EXTVER"] > 1:
raise NotImplementedError(
f"Support for {hdu.header['TELESCOP']} v{hdu.header['EXTVER']} files not"
" yet implemented"
)
# Check time scale
if hdu.header["TIMESYS"] != "TDB":
raise NotImplementedError(
f"Support for {hdu.header['TIMESYS']} time scale not yet implemented in"
f" {hdu.header['TELESCOP']} reader"
)
tab = Table.read(hdu, format="fits")
# Some KEPLER files have a T column instead of TIME.
if "T" in tab.colnames:
tab.rename_column("T", "TIME")
for colname in tab.colnames:
unit = tab[colname].unit
# Make masks nan for any column which will turn into a Quantity
# later. TODO: remove once we support Masked Quantities properly?
if unit and isinstance(tab[colname], MaskedColumn):
tab[colname] = tab[colname].filled(np.nan)
# Fix units
if unit == "e-/s":
tab[colname].unit = "electron/s"
if unit == "pixels":
tab[colname].unit = "pixel"
# Rename columns to lowercase
tab.rename_column(colname, colname.lower())
# Filter out NaN rows
nans = np.isnan(tab["time"].data)
if np.any(nans):
warnings.warn(f"Ignoring {np.sum(nans)} rows with NaN times")
tab = tab[~nans]
# Time column is dependent on source and we correct it here
reference_date = Time(
hdu.header["BJDREFI"],
hdu.header["BJDREFF"],
scale=hdu.header["TIMESYS"].lower(),
format="jd",
)
time = reference_date + TimeDelta(tab["time"].data)
time.format = "isot"
# Remove original time column
tab.remove_column("time")
hdulist.close()
return TimeSeries(time=time, data=tab)
registry.register_reader("kepler.fits", TimeSeries, kepler_fits_reader)
registry.register_reader("tess.fits", TimeSeries, kepler_fits_reader)
|
76fa9c76f5b0bb600c45053c1c0d1aa401fa51d8788b57ed6b601cf7ff44981d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from numpy.testing import assert_equal
from astropy import units as u
from astropy.table import QTable, Table, join, vstack
from astropy.time import Time
from astropy.timeseries.binned import BinnedTimeSeries
from astropy.timeseries.sampled import TimeSeries
INPUT_TIME = Time(["2016-03-22T12:30:31", "2015-01-21T12:30:32", "2016-03-22T12:30:40"])
PLAIN_TABLE = Table(
[[1.0, 2.0, 11.0], [3, 4, 1], ["x", "y", "z"]], names=["a", "b", "c"]
)
class CommonTimeSeriesTests:
def test_stacking(self):
ts = vstack([self.series, self.series])
assert isinstance(ts, self.series.__class__)
def test_row_slicing(self):
ts = self.series[:2]
assert isinstance(ts, self.series.__class__)
def test_row_indexing(self):
self.series[0][self.time_attr] == Time("2015-01-21T12:30:32")
self.series[self.time_attr][0] == Time("2015-01-21T12:30:32")
def test_column_indexing(self):
assert_equal(self.series["a"], [1, 2, 11])
def test_column_slicing_notime(self):
tab = self.series["a", "b"]
assert not isinstance(tab, self.series.__class__)
assert isinstance(tab, QTable)
def test_add_column(self):
self.series["d"] = [1, 2, 3]
def test_add_row(self):
self.series.add_row(self._row)
def test_set_unit(self):
self.series["d"] = [1, 2, 3]
self.series["d"].unit = "s"
def test_replace_column(self):
self.series.replace_column("c", [1, 3, 4])
def test_required_after_stacking(self):
# When stacking, we have to temporarily relax the checking of the
# columns in the time series, but we need to make sure that the
# checking works again afterwards
ts = vstack([self.series, self.series])
with pytest.raises(ValueError, match=r"TimeSeries object is invalid"):
ts.remove_columns(ts.colnames)
def test_join(self):
ts_other = self.series.copy()
ts_other.add_row(self._row)
ts_other["d"] = [11, 22, 33, 44]
ts_other.remove_columns(["a", "b"])
ts = join(self.series, ts_other)
assert len(ts) == len(self.series)
ts = join(self.series, ts_other, join_type="outer")
assert len(ts) == len(ts_other)
class TestTimeSeries(CommonTimeSeriesTests):
_row = {"time": "2016-03-23T12:30:40", "a": 1.0, "b": 2, "c": "a"}
def setup_method(self, method):
self.series = TimeSeries(time=INPUT_TIME, data=PLAIN_TABLE)
self.time_attr = "time"
def test_column_slicing(self):
ts = self.series["time", "a"]
assert isinstance(ts, TimeSeries)
class TestBinnedTimeSeries(CommonTimeSeriesTests):
_row = {
"time_bin_start": "2016-03-23T12:30:40",
"time_bin_size": 2 * u.s,
"a": 1.0,
"b": 2,
"c": "a",
}
def setup_method(self, method):
self.series = BinnedTimeSeries(
time_bin_start=INPUT_TIME, time_bin_size=3 * u.s, data=PLAIN_TABLE
)
self.time_attr = "time_bin_start"
def test_column_slicing(self):
ts = self.series["time_bin_start", "time_bin_size", "a"]
assert isinstance(ts, BinnedTimeSeries)
|
dbb0b6dd7140e0fd948e313053ce3807f4f934d40c44dc58b07d8326547210a9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import numpy as np
import pytest
from numpy.testing import assert_equal
from astropy import units as u
from astropy.time import Time
from astropy.timeseries.downsample import aggregate_downsample, reduceat
from astropy.timeseries.sampled import TimeSeries
from astropy.utils.exceptions import AstropyUserWarning
INPUT_TIME = Time(
[
"2016-03-22T12:30:31",
"2016-03-22T12:30:32",
"2016-03-22T12:30:33",
"2016-03-22T12:30:34",
"2016-03-22T12:30:35",
]
)
def test_reduceat():
add_output = np.add.reduceat(np.arange(8), [0, 4, 1, 5, 2, 6, 3, 7])
# Similar to np.add for an array input.
sum_output = reduceat(np.arange(8), [0, 4, 1, 5, 2, 6, 3, 7], np.sum)
assert_equal(sum_output, add_output)
mean_output = reduceat(np.arange(8), np.arange(8)[::2], np.mean)
assert_equal(mean_output, np.array([0.5, 2.5, 4.5, 6.5]))
nanmean_output = reduceat(np.arange(8), [0, 4, 1, 5, 2, 6, 3, 7], np.mean)
assert_equal(nanmean_output, np.array([1.5, 4, 2.5, 5, 3.5, 6, 4.5, 7.0]))
assert_equal(
reduceat(np.arange(8), np.arange(8)[::2], np.mean),
reduceat(np.arange(8), np.arange(8)[::2], np.nanmean),
)
def test_timeseries_invalid():
with pytest.raises(TypeError, match="time_series should be a TimeSeries"):
aggregate_downsample(None)
def test_time_bin_invalid():
# Make sure to raise the right exception when time_bin_* is passed incorrectly.
with pytest.raises(
TypeError, match=r"'time_bin_size' should be a Quantity or a TimeDelta"
):
aggregate_downsample(TimeSeries(), time_bin_size=1)
def test_binning_arg_invalid():
ts = TimeSeries(time=INPUT_TIME, data=[[1, 2, 3, 4, 5]], names=["a"])
with pytest.raises(
TypeError,
match=r"With single 'time_bin_start' either 'n_bins', "
"'time_bin_size' or time_bin_end' must be provided",
):
aggregate_downsample(ts)
def test_time_bin_conversion():
ts = TimeSeries(time=INPUT_TIME, data=[[1, 2, 3, 4, 5]], names=["a"])
# Make sure time_bin_start and time_bin_end are properly converted to Time
down_start = aggregate_downsample(
ts, time_bin_start=["2016-03-22T12:30:31"], time_bin_size=[1] * u.s
)
assert_equal(down_start.time_bin_start.isot, ["2016-03-22T12:30:31.000"])
down_end = aggregate_downsample(
ts,
time_bin_start=["2016-03-22T12:30:31", "2016-03-22T12:30:33"],
time_bin_end="2016-03-22T12:30:34",
)
assert_equal(
down_end.time_bin_end.isot,
["2016-03-22T12:30:33.000", "2016-03-22T12:30:34.000"],
)
def test_time_bin_end_auto():
ts = TimeSeries(time=INPUT_TIME, data=[[1, 2, 3, 4, 5]], names=["a"])
# Interpret `time_bin_end` as the end of timeseries when `time_bin_start` is
# an array and `time_bin_size` is not provided
down_auto_end = aggregate_downsample(
ts, time_bin_start=["2016-03-22T12:30:31", "2016-03-22T12:30:33"]
)
assert_equal(
down_auto_end.time_bin_end.isot,
["2016-03-22T12:30:33.000", "2016-03-22T12:30:35.000"],
)
def test_time_bin_start_array():
ts = TimeSeries(time=INPUT_TIME, data=[[1, 2, 3, 4, 5]], names=["a"])
# When `time_bin_end` is an array and `time_bin_start` is not provided, `time_bin_start` is converted
# to an array with its first element set to the start of the timeseries and rest populated using
# `time_bin_end`. This case is separately tested since `BinnedTimeSeries` allows `time_bin_end` to
# be an array only if `time_bin_start` is an array.
down_start_array = aggregate_downsample(
ts, time_bin_end=["2016-03-22T12:30:33", "2016-03-22T12:30:35"]
)
assert_equal(
down_start_array.time_bin_start.isot,
["2016-03-22T12:30:31.000", "2016-03-22T12:30:33.000"],
)
def test_nbins():
ts = TimeSeries(time=INPUT_TIME, data=[[1, 2, 3, 4, 5]], names=["a"])
# n_bins should default to the number needed to fit all the original points
down_nbins = aggregate_downsample(ts, n_bins=2)
assert_equal(
down_nbins.time_bin_start.isot,
["2016-03-22T12:30:31.000", "2016-03-22T12:30:33.000"],
)
# Regression test for #12527: ignore `n_bins` if `time_bin_start` is an array
n_times = len(INPUT_TIME)
for n_bins in [0, n_times - 1, n_times, n_times + 1]:
down_nbins = aggregate_downsample(ts, time_bin_start=INPUT_TIME, n_bins=n_bins)
assert len(down_nbins) == n_times
def test_downsample():
ts = TimeSeries(time=INPUT_TIME, data=[[1, 2, 3, 4, 5]], names=["a"])
ts_units = TimeSeries(
time=INPUT_TIME, data=[[1, 2, 3, 4, 5] * u.count], names=["a"]
)
# Avoid precision problems with floating-point comparisons on 32bit
if sys.maxsize > 2**32:
# 64 bit
time_bin_incr = 1 * u.s
time_bin_start = None
else:
# 32 bit
time_bin_incr = (1 - 1e-6) * u.s
time_bin_start = ts.time[0] - 1 * u.ns
down_1 = aggregate_downsample(
ts, time_bin_size=time_bin_incr, time_bin_start=time_bin_start
)
u.isclose(down_1.time_bin_size, [1, 1, 1, 1, 1] * time_bin_incr)
assert_equal(
down_1.time_bin_start.isot,
Time(
[
"2016-03-22T12:30:31.000",
"2016-03-22T12:30:32.000",
"2016-03-22T12:30:33.000",
"2016-03-22T12:30:34.000",
"2016-03-22T12:30:35.000",
]
),
)
assert_equal(down_1["a"].data.data, np.array([1, 2, 3, 4, 5]))
down_2 = aggregate_downsample(
ts, time_bin_size=2 * time_bin_incr, time_bin_start=time_bin_start
)
u.isclose(down_2.time_bin_size, [2, 2, 2] * time_bin_incr)
assert_equal(
down_2.time_bin_start.isot,
Time(
[
"2016-03-22T12:30:31.000",
"2016-03-22T12:30:33.000",
"2016-03-22T12:30:35.000",
]
),
)
assert_equal(down_2["a"].data.data, np.array([1, 3, 5]))
down_3 = aggregate_downsample(
ts, time_bin_size=3 * time_bin_incr, time_bin_start=time_bin_start
)
u.isclose(down_3.time_bin_size, [3, 3] * time_bin_incr)
assert_equal(
down_3.time_bin_start.isot,
Time(["2016-03-22T12:30:31.000", "2016-03-22T12:30:34.000"]),
)
assert_equal(down_3["a"].data.data, np.array([2, 4]))
down_4 = aggregate_downsample(
ts, time_bin_size=4 * time_bin_incr, time_bin_start=time_bin_start
)
u.isclose(down_4.time_bin_size, [4, 4] * time_bin_incr)
assert_equal(
down_4.time_bin_start.isot,
Time(["2016-03-22T12:30:31.000", "2016-03-22T12:30:35.000"]),
)
assert_equal(down_4["a"].data.data, np.array([2, 5]))
down_units = aggregate_downsample(
ts_units, time_bin_size=4 * time_bin_incr, time_bin_start=time_bin_start
)
u.isclose(down_units.time_bin_size, [4, 4] * time_bin_incr)
assert_equal(
down_units.time_bin_start.isot,
Time(["2016-03-22T12:30:31.000", "2016-03-22T12:30:35.000"]),
)
assert down_units["a"].unit.name == "ct"
assert_equal(down_units["a"].data, np.array([2.5, 5.0]))
# Contiguous bins with uneven bin sizes: `time_bin_size` is an array
down_uneven_bins = aggregate_downsample(
ts, time_bin_size=[2, 1, 1] * time_bin_incr, time_bin_start=time_bin_start
)
u.isclose(down_uneven_bins.time_bin_size, [2, 1, 1] * time_bin_incr)
assert_equal(
down_uneven_bins.time_bin_start.isot,
Time(
[
"2016-03-22T12:30:31.000",
"2016-03-22T12:30:33.000",
"2016-03-22T12:30:34.000",
]
),
)
assert_equal(down_uneven_bins["a"].data.data, np.array([1, 3, 4]))
# Uncontiguous bins with even bin sizes: `time_bin_start` and `time_bin_end` are both arrays
down_time_array = aggregate_downsample(
ts,
time_bin_start=Time(["2016-03-22T12:30:31.000", "2016-03-22T12:30:34.000"]),
time_bin_end=Time(["2016-03-22T12:30:32.000", "2016-03-22T12:30:35.000"]),
)
u.isclose(down_time_array.time_bin_size, [1, 1] * u.second)
assert_equal(
down_time_array.time_bin_start.isot,
Time(["2016-03-22T12:30:31.000", "2016-03-22T12:30:34.000"]),
)
assert_equal(down_time_array["a"].data.data, np.array([1, 4]))
# Overlapping bins
with pytest.warns(
AstropyUserWarning,
match=(
"Overlapping bins should be avoided since they "
"can lead to double-counting of data during binning."
),
):
down_overlap_bins = aggregate_downsample(
ts,
time_bin_start=Time(["2016-03-22T12:30:31.000", "2016-03-22T12:30:33.000"]),
time_bin_end=Time(["2016-03-22T12:30:34", "2016-03-22T12:30:36.000"]),
)
assert_equal(down_overlap_bins["a"].data, np.array([2, 5]))
@pytest.mark.parametrize(
"time, time_bin_start, time_bin_end",
[
(INPUT_TIME[:2], INPUT_TIME[2:], None),
(INPUT_TIME[3:], INPUT_TIME[:2], INPUT_TIME[1:3]),
(INPUT_TIME[[0]], INPUT_TIME[:2], None),
(INPUT_TIME[[0]], INPUT_TIME[::2], None),
],
)
def test_downsample_edge_cases(time, time_bin_start, time_bin_end):
"""Regression test for #12527: allow downsampling even if all bins fall
before or beyond the time span of the data."""
ts = TimeSeries(time=time, data=[np.ones(len(time))], names=["a"])
down = aggregate_downsample(
ts, time_bin_start=time_bin_start, time_bin_end=time_bin_end
)
assert len(down) == len(time_bin_start)
assert all(down["time_bin_size"] >= 0) # bin lengths shall never be negative
if ts.time.min() < time_bin_start[0] or time_bin_end is not None:
assert down[
"a"
].mask.all() # all bins placed *beyond* the time span of the data
elif ts.time.min() < time_bin_start[1]:
assert (
down["a"][0] == ts["a"][0]
) # single-valued time series falls in *first* bin
@pytest.mark.parametrize(
"diff_from_base", [1 * u.year, 10 * u.year, 50 * u.year, 100 * u.year]
)
def test_time_precision_limit(diff_from_base):
"""
A test on time precision limit supported by downsample().
It is related to an implementation details: that time comparison (and sorting indirectly)
is done with relative time for computational efficiency.
The relative time converted has a slight loss of precision, which worsens
as the gap between a time and the base time increases, e.g., when downsampling
a timeseries that combines current observation with archival data years back.
This test is to document the acceptable precision limit.
see also: https://github.com/astropy/astropy/pull/13069#issuecomment-1093069184
"""
precision_limit = 500 * u.ns
from astropy.timeseries.downsample import _to_relative_longdouble
t_base = Time("1980-01-01T12:30:31.000", format="isot", scale="tdb")
t2 = t_base + diff_from_base
t3 = t2 + precision_limit
r_t2 = _to_relative_longdouble(t2, t_base)
r_t3 = _to_relative_longdouble(t3, t_base)
# ensure in the converted relative time,
# t2 and t3 can still be correctly compared
assert r_t3 > r_t2
|
56699bafa0abd5c7f208174199ac7a347d2982c14e22b842b4416e5ad4a82d54 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from datetime import datetime
import pytest
from numpy.testing import assert_allclose, assert_equal
from astropy import units as u
from astropy.table import Column, Table
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time, TimeDelta
from astropy.timeseries.periodograms import BoxLeastSquares, LombScargle
from astropy.timeseries.sampled import TimeSeries
from astropy.units import Quantity
from astropy.utils.data import get_pkg_data_filename
INPUT_TIME = Time(["2016-03-22T12:30:31", "2015-01-21T12:30:32", "2016-03-22T12:30:40"])
PLAIN_TABLE = Table([[1, 2, 11], [3, 4, 1], [1, 1, 1]], names=["a", "b", "c"])
CSV_FILE = get_pkg_data_filename("data/sampled.csv")
def test_empty_initialization():
ts = TimeSeries()
ts["time"] = Time([50001, 50002, 50003], format="mjd")
def test_empty_initialization_invalid():
# Make sure things crash when the first column added is not a time column
ts = TimeSeries()
with pytest.raises(
ValueError,
match=(
r"TimeSeries object is invalid - expected 'time' as the first column but"
r" found 'flux'"
),
):
ts["flux"] = [1, 2, 3]
def test_initialize_only_time():
ts = TimeSeries(time=INPUT_TIME)
assert ts["time"] is ts.time
# NOTE: the object in the table is a copy
assert_equal(ts.time.isot, INPUT_TIME.isot)
def test_initialization_with_data():
ts = TimeSeries(time=INPUT_TIME, data=[[10, 2, 3], [4, 5, 6]], names=["a", "b"])
assert_equal(ts.time.isot, INPUT_TIME.isot)
assert_equal(ts["a"], [10, 2, 3])
assert_equal(ts["b"], [4, 5, 6])
def test_initialize_only_data():
with pytest.raises(
TypeError, match=r"Either 'time' or 'time_start' should be specified"
):
TimeSeries(data=[[10, 2, 3], [4, 5, 6]], names=["a", "b"])
def test_initialization_with_table():
ts = TimeSeries(time=INPUT_TIME, data=PLAIN_TABLE)
assert ts.colnames == ["time", "a", "b", "c"]
def test_initialization_with_time_delta():
ts = TimeSeries(
time_start=datetime(2018, 7, 1, 10, 10, 10),
time_delta=TimeDelta(3, format="sec"),
data=[[10, 2, 3], [4, 5, 6]],
names=["a", "b"],
)
assert_equal(
ts.time.isot,
[
"2018-07-01T10:10:10.000",
"2018-07-01T10:10:13.000",
"2018-07-01T10:10:16.000",
],
)
def test_initialization_missing_time_delta():
with pytest.raises(
TypeError, match=r"'time' is scalar, so 'time_delta' is required"
):
TimeSeries(
time_start=datetime(2018, 7, 1, 10, 10, 10),
data=[[10, 2, 3], [4, 5, 6]],
names=["a", "b"],
)
def test_initialization_invalid_time_and_time_start():
with pytest.raises(TypeError, match=r"Cannot specify both 'time' and 'time_start'"):
TimeSeries(
time=INPUT_TIME,
time_start=datetime(2018, 7, 1, 10, 10, 10),
data=[[10, 2, 3], [4, 5, 6]],
names=["a", "b"],
)
def test_initialization_invalid_time_delta():
with pytest.raises(
TypeError, match=r"'time_delta' should be a Quantity or a TimeDelta"
):
TimeSeries(
time_start=datetime(2018, 7, 1, 10, 10, 10),
time_delta=[1, 4, 3],
data=[[10, 2, 3], [4, 5, 6]],
names=["a", "b"],
)
def test_initialization_with_time_in_data():
data = PLAIN_TABLE.copy()
data["time"] = INPUT_TIME
ts1 = TimeSeries(data=data)
assert set(ts1.colnames) == {"time", "a", "b", "c"}
assert all(ts1.time == INPUT_TIME)
ts2 = TimeSeries(data=[[10, 2, 3], INPUT_TIME], names=["a", "time"])
assert set(ts2.colnames) == {"time", "a"}
assert all(ts2.time == INPUT_TIME)
MESSAGE = r"'time' has been given both in the table and as a keyword argument"
with pytest.raises(TypeError, match=MESSAGE):
# Don't allow ambiguous cases of passing multiple 'time' columns
TimeSeries(data=data, time=INPUT_TIME)
with pytest.raises(TypeError, match=MESSAGE):
# 'time' is a protected name, don't allow ambiguous cases
TimeSeries(time=INPUT_TIME, data=[[10, 2, 3], INPUT_TIME], names=["a", "time"])
def test_initialization_n_samples():
# Make sure things crash with incorrect n_samples
with pytest.raises(
TypeError,
match=(
r"'n_samples' has been given both and it is not the same length as the"
r" input data."
),
):
TimeSeries(time=INPUT_TIME, data=PLAIN_TABLE, n_samples=1000)
def test_initialization_length_mismatch():
with pytest.raises(
ValueError, match=r"Length of 'time' \(3\) should match data length \(2\)"
):
TimeSeries(time=INPUT_TIME, data=[[10, 2], [4, 5]], names=["a", "b"])
def test_initialization_invalid_both_time_and_time_delta():
with pytest.raises(
TypeError,
match=r"'time_delta' should not be specified since 'time' is an array",
):
TimeSeries(time=INPUT_TIME, time_delta=TimeDelta(3, format="sec"))
def test_fold():
times = Time([1, 2, 3, 8, 9, 12], format="unix")
ts = TimeSeries(time=times)
ts["flux"] = [1, 4, 4, 3, 2, 3]
# Try without epoch time, as it should default to the first time and
# wrapping at half the period.
tsf = ts.fold(period=3.2 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0, 1, -1.2, 0.6, -1.6, 1.4], rtol=1e-6)
# Try with epoch time
tsf = ts.fold(period=3.2 * u.s, epoch_time=Time(1.6, format="unix"))
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [-0.6, 0.4, 1.4, 0.0, 1.0, 0.8], rtol=1e-6, atol=1e-6)
# Now with wrap_phase set to the full period
tsf = ts.fold(period=3.2 * u.s, wrap_phase=3.2 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0, 1, 2, 0.6, 1.6, 1.4], rtol=1e-6)
# Now set epoch_phase to be 1/4 of the way through the phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.8 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0.8, -1.4, -0.4, 1.4, -0.8, -1.0], rtol=1e-6)
# And combining epoch_phase and wrap_phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.8 * u.s, wrap_phase=3.2 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0.8, 1.8, 2.8, 1.4, 2.4, 2.2], rtol=1e-6)
# Now repeat the above tests but with normalization applied
# Try without epoch time, as it should default to the first time and
# wrapping at half the period.
tsf = ts.fold(period=3.2 * u.s, normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(
tsf.time.to_value(u.one),
[0, 1 / 3.2, -1.2 / 3.2, 0.6 / 3.2, -1.6 / 3.2, 1.4 / 3.2],
rtol=1e-6,
)
# Try with epoch time
tsf = ts.fold(
period=3.2 * u.s, epoch_time=Time(1.6, format="unix"), normalize_phase=True
)
assert isinstance(tsf.time, Quantity)
assert_allclose(
tsf.time.to_value(u.one),
[-0.6 / 3.2, 0.4 / 3.2, 1.4 / 3.2, 0.0 / 3.2, 1.0 / 3.2, 0.8 / 3.2],
rtol=1e-6,
atol=1e-6,
)
# Now with wrap_phase set to the full period
tsf = ts.fold(period=3.2 * u.s, wrap_phase=1, normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(
tsf.time.to_value(u.one),
[0, 1 / 3.2, 2 / 3.2, 0.6 / 3.2, 1.6 / 3.2, 1.4 / 3.2],
rtol=1e-6,
)
# Now set epoch_phase to be 1/4 of the way through the phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.25, normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(
tsf.time.to_value(u.one),
[0.8 / 3.2, -1.4 / 3.2, -0.4 / 3.2, 1.4 / 3.2, -0.8 / 3.2, -1.0 / 3.2],
rtol=1e-6,
)
# And combining epoch_phase and wrap_phase
tsf = ts.fold(
period=3.2 * u.s, epoch_phase=0.25, wrap_phase=1, normalize_phase=True
)
assert isinstance(tsf.time, Quantity)
assert_allclose(
tsf.time.to_value(u.one),
[0.8 / 3.2, 1.8 / 3.2, 2.8 / 3.2, 1.4 / 3.2, 2.4 / 3.2, 2.2 / 3.2],
rtol=1e-6,
)
def test_fold_invalid_options():
times = Time([1, 2, 3, 8, 9, 12], format="unix")
ts = TimeSeries(time=times)
ts["flux"] = [1, 4, 4, 3, 2, 3]
with pytest.raises(
u.UnitsError, match="period should be a Quantity in units of time"
):
ts.fold(period=3.2)
with pytest.raises(
u.UnitsError, match="period should be a Quantity in units of time"
):
ts.fold(period=3.2 * u.m)
with pytest.raises(
u.UnitsError,
match=(
"epoch_phase should be a Quantity in units of "
"time when normalize_phase=False"
),
):
ts.fold(period=3.2 * u.s, epoch_phase=0.2)
with pytest.raises(
u.UnitsError,
match=(
"epoch_phase should be a dimensionless Quantity "
"or a float when normalize_phase=True"
),
):
ts.fold(period=3.2 * u.s, epoch_phase=0.2 * u.s, normalize_phase=True)
with pytest.raises(
u.UnitsError,
match=(
"wrap_phase should be a Quantity in units of "
"time when normalize_phase=False"
),
):
ts.fold(period=3.2 * u.s, wrap_phase=0.2)
with pytest.raises(
u.UnitsError,
match="wrap_phase should be dimensionless when normalize_phase=True",
):
ts.fold(period=3.2 * u.s, wrap_phase=0.2 * u.s, normalize_phase=True)
with pytest.raises(
ValueError, match="wrap_phase should be between 0 and the period"
):
ts.fold(period=3.2 * u.s, wrap_phase=-0.1 * u.s)
with pytest.raises(
ValueError, match="wrap_phase should be between 0 and the period"
):
ts.fold(period=3.2 * u.s, wrap_phase=-4.2 * u.s)
with pytest.raises(ValueError, match="wrap_phase should be between 0 and 1"):
ts.fold(period=3.2 * u.s, wrap_phase=-0.1, normalize_phase=True)
with pytest.raises(ValueError, match="wrap_phase should be between 0 and 1"):
ts.fold(period=3.2 * u.s, wrap_phase=2.2, normalize_phase=True)
def test_pandas():
pandas = pytest.importorskip("pandas")
df1 = pandas.DataFrame()
df1["a"] = [1, 2, 3]
df1.set_index(pandas.DatetimeIndex(INPUT_TIME.datetime64), inplace=True)
ts = TimeSeries.from_pandas(df1)
assert_equal(ts.time.isot, INPUT_TIME.isot)
assert ts.colnames == ["time", "a"]
assert len(ts.indices) == 1
assert (ts.indices["time"].columns[0] == INPUT_TIME).all()
ts_tcb = TimeSeries.from_pandas(df1, time_scale="tcb")
assert ts_tcb.time.scale == "tcb"
df2 = ts.to_pandas()
assert (df2.index.values == pandas.Index(INPUT_TIME.datetime64).values).all()
assert df2.columns == pandas.Index(["a"])
assert (df1["a"] == df2["a"]).all()
with pytest.raises(TypeError, match=r"Input should be a pandas DataFrame"):
TimeSeries.from_pandas(None)
df4 = pandas.DataFrame()
df4["a"] = [1, 2, 3]
with pytest.raises(TypeError, match=r"DataFrame does not have a DatetimeIndex"):
TimeSeries.from_pandas(df4)
def test_read_time_missing():
with pytest.raises(
ValueError,
match=(
r"``time_column`` should be provided since the default Table readers are"
r" being used\."
),
):
TimeSeries.read(CSV_FILE, format="csv")
def test_read_time_wrong():
with pytest.raises(
ValueError, match=r"Time column 'abc' not found in the input data\."
):
TimeSeries.read(CSV_FILE, time_column="abc", format="csv")
def test_read():
timeseries = TimeSeries.read(CSV_FILE, time_column="Date", format="csv")
assert timeseries.colnames == ["time", "A", "B", "C", "D", "E", "F", "G"]
assert len(timeseries) == 11
assert timeseries["time"].format == "iso"
assert timeseries["A"].sum() == 266.5
@pytest.mark.remote_data(source="astropy")
def test_kepler_astropy():
from astropy.units import UnitsWarning
filename = get_pkg_data_filename("timeseries/kplr010666592-2009131110544_slc.fits")
with pytest.warns(UnitsWarning):
timeseries = TimeSeries.read(filename, format="kepler.fits")
assert timeseries["time"].format == "isot"
assert timeseries["time"].scale == "tdb"
assert timeseries["sap_flux"].unit.to_string() == "electron / s"
assert len(timeseries) == 14280
assert len(timeseries.columns) == 20
@pytest.mark.remote_data(source="astropy")
def test_tess_astropy():
filename = get_pkg_data_filename(
"timeseries/hlsp_tess-data-alerts_tess_phot_00025155310-s01_tess_v1_lc.fits"
)
with pytest.warns(UserWarning, match="Ignoring 815 rows with NaN times"):
timeseries = TimeSeries.read(filename, format="tess.fits")
assert timeseries["time"].format == "isot"
assert timeseries["time"].scale == "tdb"
assert timeseries["sap_flux"].unit.to_string() == "electron / s"
assert len(timeseries) == 19261
assert len(timeseries.columns) == 20
def test_required_columns():
# Test the machinery that makes sure that the required columns are present
ts = TimeSeries(time=INPUT_TIME, data=[[10, 2, 3], [4, 5, 6]], names=["a", "b"])
# In the examples below, the operation (e.g. remove_column) is actually
# carried out before the checks are made, so we need to use copy() so that
# we don't change the main version of the time series.
# Make sure copy works fine
ts.copy()
MESSAGE = (
r"TimeSeries object is invalid - expected 'time' as the first column but found"
r" '{}'"
)
with pytest.raises(ValueError, match=MESSAGE.format("c")):
ts.copy().add_column(Column([3, 4, 5], name="c"), index=0)
with pytest.raises(ValueError, match=MESSAGE.format("d")):
ts.copy().add_columns(
[Column([3, 4, 5], name="d"), Column([3, 4, 5], name="e")], indexes=[0, 1]
)
with pytest.raises(ValueError, match=MESSAGE.format("a")):
ts.copy().keep_columns(["a", "b"])
with pytest.raises(ValueError, match=MESSAGE.format("a")):
ts.copy().remove_column("time")
with pytest.raises(ValueError, match=MESSAGE.format("b")):
ts.copy().remove_columns(["time", "a"])
with pytest.raises(ValueError, match=MESSAGE.format("banana")):
ts.copy().rename_column("time", "banana")
# https://github.com/astropy/astropy/issues/13009
MESSAGE = (
r"TimeSeries object is invalid - expected \['time', 'a'\] as the first columns"
r" but found \['time', 'b'\]"
)
ts_2cols_required = ts.copy()
ts_2cols_required._required_columns = ["time", "a"]
with pytest.raises(ValueError, match=MESSAGE):
ts_2cols_required.remove_column("a")
@pytest.mark.parametrize("cls", [BoxLeastSquares, LombScargle])
def test_periodogram(cls):
# Note that we don't need to check the actual results from the periodogram
# classes here since these are tested extensively in
# astropy.timeseries.periodograms.
ts = TimeSeries(time=INPUT_TIME, data=[[10, 2, 3], [4, 5, 6]], names=["a", "b"])
p1 = cls.from_timeseries(ts, "a")
assert isinstance(p1, cls)
assert_allclose(p1.t.jd, ts.time.jd)
assert_equal(p1.y, ts["a"])
assert p1.dy is None
p2 = cls.from_timeseries(ts, "a", uncertainty="b")
assert_quantity_allclose(p2.dy, ts["b"])
p3 = cls.from_timeseries(ts, "a", uncertainty=0.1)
assert_allclose(p3.dy, 0.1)
|
b3e223ce52673a7577321981e2ca10ddee78320ee332a2c1262bee52dd536d7f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from numpy.testing import assert_allclose, assert_equal
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time, TimeDelta
from astropy.timeseries.binned import BinnedTimeSeries
from astropy.timeseries.periodograms import BoxLeastSquares, LombScargle
from astropy.utils.data import get_pkg_data_filename
CSV_FILE = get_pkg_data_filename("data/binned.csv")
def test_empty_initialization():
ts = BinnedTimeSeries()
ts["time_bin_start"] = Time([1, 2, 3], format="mjd")
def test_empty_initialization_invalid():
# Make sure things crash when the first column added is not a time column
ts = BinnedTimeSeries()
with pytest.raises(
ValueError,
match=(
r"BinnedTimeSeries object is invalid - expected 'time_bin_start' as the"
r" first column but found 'flux'"
),
):
ts["flux"] = [1, 2, 3]
def test_initialization_time_bin_invalid():
# Make sure things crash when time_bin_* is passed incorrectly.
with pytest.raises(TypeError, match=r"'time_bin_start' has not been specified"):
BinnedTimeSeries(data=[[1, 4, 3]])
with pytest.raises(
TypeError, match=r"Either 'time_bin_size' or 'time_bin_end' should be specified"
):
BinnedTimeSeries(time_bin_start="2016-03-22T12:30:31", data=[[1, 4, 3]])
def test_initialization_time_bin_both():
# Make sure things crash when time_bin_* is passed twice.
MESSAGE = r"'{}' has been given both in the table and as a keyword argument"
with pytest.raises(TypeError, match=MESSAGE.format("time_bin_start")):
BinnedTimeSeries(
data={"time_bin_start": ["2016-03-22T12:30:31"]},
time_bin_start="2016-03-22T12:30:31",
)
with pytest.raises(TypeError, match=MESSAGE.format("time_bin_size")):
BinnedTimeSeries(
data={"time_bin_size": ["2016-03-22T12:30:31"]}, time_bin_size=[1] * u.s
)
def test_initialization_time_bin_size():
# Make sure things crash when time_bin_size has no units
with pytest.raises(
TypeError, match=r"'time_bin_size' should be a Quantity or a TimeDelta"
):
BinnedTimeSeries(
data={"time": ["2016-03-22T12:30:31"]},
time_bin_start="2016-03-22T12:30:31",
time_bin_size=1,
)
# TimeDelta for time_bin_size
ts = BinnedTimeSeries(
data={"time": ["2016-03-22T12:30:31"]},
time_bin_start="2016-03-22T12:30:31",
time_bin_size=TimeDelta(1, format="jd"),
)
assert isinstance(ts.time_bin_size, u.quantity.Quantity)
def test_initialization_time_bin_start_scalar():
# Make sure things crash when time_bin_start is a scalar with no time_bin_size
with pytest.raises(
TypeError, match=r"'time_bin_start' is scalar, so 'time_bin_size' is required"
):
BinnedTimeSeries(
data={"time": ["2016-03-22T12:30:31"]},
time_bin_start=Time(1, format="mjd"),
time_bin_end=Time(1, format="mjd"),
)
def test_initialization_n_bins_invalid_arguments():
# Make sure an exception is raised when n_bins is passed as an argument while
# any of the parameters 'time_bin_start' or 'time_bin_end' is not scalar.
with pytest.raises(
TypeError,
match=(
r"'n_bins' cannot be specified if 'time_bin_start' or 'time_bin_size' are"
r" not scalar'"
),
):
BinnedTimeSeries(
time_bin_start=Time([1, 2, 3], format="cxcsec"),
time_bin_size=1 * u.s,
n_bins=10,
)
def test_initialization_n_bins():
# Make sure things crash with incorrect n_bins
with pytest.raises(
TypeError,
match=(
r"'n_bins' has been given and it is not the same length as the input data\."
),
):
BinnedTimeSeries(
data={"time": ["2016-03-22T12:30:31"]},
time_bin_start=Time(1, format="mjd"),
time_bin_size=1 * u.s,
time_bin_end=Time(1, format="mjd"),
n_bins=10,
)
def test_initialization_non_scalar_time():
# Make sure things crash with incorrect size of time_bin_start
with pytest.raises(
ValueError,
match=r"Length of 'time_bin_start' \(2\) should match table length \(1\)",
):
BinnedTimeSeries(
data={"time": ["2016-03-22T12:30:31"]},
time_bin_start=["2016-03-22T12:30:31", "2016-03-22T12:30:32"],
time_bin_size=1 * u.s,
time_bin_end=Time(1, format="mjd"),
)
with pytest.raises(
TypeError, match=r"Either 'time_bin_size' or 'time_bin_end' should be specified"
):
BinnedTimeSeries(
data={"time": ["2016-03-22T12:30:31"]},
time_bin_start=["2016-03-22T12:30:31"],
time_bin_size=None,
time_bin_end=None,
)
def test_even_contiguous():
# Initialize a ``BinnedTimeSeries`` with even contiguous bins by specifying
# the bin width:
ts = BinnedTimeSeries(
time_bin_start="2016-03-22T12:30:31", time_bin_size=3 * u.s, data=[[1, 4, 3]]
)
assert_equal(
ts.time_bin_start.isot,
[
"2016-03-22T12:30:31.000",
"2016-03-22T12:30:34.000",
"2016-03-22T12:30:37.000",
],
)
assert_equal(
ts.time_bin_center.isot,
[
"2016-03-22T12:30:32.500",
"2016-03-22T12:30:35.500",
"2016-03-22T12:30:38.500",
],
)
assert_equal(
ts.time_bin_end.isot,
[
"2016-03-22T12:30:34.000",
"2016-03-22T12:30:37.000",
"2016-03-22T12:30:40.000",
],
)
def test_uneven_contiguous():
# Initialize a ``BinnedTimeSeries`` with uneven contiguous bins by giving an
# end time:
ts = BinnedTimeSeries(
time_bin_start=[
"2016-03-22T12:30:31",
"2016-03-22T12:30:32",
"2016-03-22T12:30:40",
],
time_bin_end="2016-03-22T12:30:55",
data=[[1, 4, 3]],
)
assert_equal(
ts.time_bin_start.isot,
[
"2016-03-22T12:30:31.000",
"2016-03-22T12:30:32.000",
"2016-03-22T12:30:40.000",
],
)
assert_equal(
ts.time_bin_center.isot,
[
"2016-03-22T12:30:31.500",
"2016-03-22T12:30:36.000",
"2016-03-22T12:30:47.500",
],
)
assert_equal(
ts.time_bin_end.isot,
[
"2016-03-22T12:30:32.000",
"2016-03-22T12:30:40.000",
"2016-03-22T12:30:55.000",
],
)
def test_uneven_non_contiguous():
# Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins with
# lists of start times, bin sizes and data:
ts = BinnedTimeSeries(
time_bin_start=[
"2016-03-22T12:30:31",
"2016-03-22T12:30:38",
"2016-03-22T12:34:40",
],
time_bin_size=[5, 100, 2] * u.s,
data=[[1, 4, 3]],
)
assert_equal(
ts.time_bin_start.isot,
[
"2016-03-22T12:30:31.000",
"2016-03-22T12:30:38.000",
"2016-03-22T12:34:40.000",
],
)
assert_equal(
ts.time_bin_center.isot,
[
"2016-03-22T12:30:33.500",
"2016-03-22T12:31:28.000",
"2016-03-22T12:34:41.000",
],
)
assert_equal(
ts.time_bin_end.isot,
[
"2016-03-22T12:30:36.000",
"2016-03-22T12:32:18.000",
"2016-03-22T12:34:42.000",
],
)
def test_uneven_non_contiguous_full():
# Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins by
# specifying the start and end times for the bins:
ts = BinnedTimeSeries(
time_bin_start=[
"2016-03-22T12:30:31",
"2016-03-22T12:30:33",
"2016-03-22T12:30:40",
],
time_bin_end=[
"2016-03-22T12:30:32",
"2016-03-22T12:30:35",
"2016-03-22T12:30:41",
],
data=[[1, 4, 3]],
)
assert_equal(
ts.time_bin_start.isot,
[
"2016-03-22T12:30:31.000",
"2016-03-22T12:30:33.000",
"2016-03-22T12:30:40.000",
],
)
assert_equal(
ts.time_bin_center.isot,
[
"2016-03-22T12:30:31.500",
"2016-03-22T12:30:34.000",
"2016-03-22T12:30:40.500",
],
)
assert_equal(
ts.time_bin_end.isot,
[
"2016-03-22T12:30:32.000",
"2016-03-22T12:30:35.000",
"2016-03-22T12:30:41.000",
],
)
def test_read_empty():
with pytest.raises(
ValueError,
match=(
r"``time_bin_start_column`` should be provided since the default Table"
r" readers are being used\."
),
):
BinnedTimeSeries.read(CSV_FILE, format="csv")
def test_read_no_size_end():
with pytest.raises(
ValueError,
match=(
r"Either `time_bin_end_column` or `time_bin_size_column` should be"
r" provided\."
),
):
BinnedTimeSeries.read(
CSV_FILE, time_bin_start_column="time_start", format="csv"
)
def test_read_both_extra_bins():
with pytest.raises(
ValueError,
match=r"Cannot specify both `time_bin_end_column` and `time_bin_size_column`\.",
):
BinnedTimeSeries.read(
CSV_FILE,
time_bin_start_column="time_start",
time_bin_end_column="END",
time_bin_size_column="bin_size",
format="csv",
)
def test_read_size_no_unit():
with pytest.raises(
ValueError,
match=(
r"The bin size unit should be specified as an astropy Unit using"
r" ``time_bin_size_unit``\."
),
):
BinnedTimeSeries.read(
CSV_FILE,
time_bin_start_column="time_start",
time_bin_size_column="bin_size",
format="csv",
)
def test_read_start_time_missing():
with pytest.raises(
ValueError, match=r"Bin start time column 'abc' not found in the input data\."
):
BinnedTimeSeries.read(
CSV_FILE,
time_bin_start_column="abc",
time_bin_size_column="bin_size",
time_bin_size_unit=u.second,
format="csv",
)
def test_read_end_time_missing():
with pytest.raises(
ValueError, match=r"Bin end time column 'missing' not found in the input data\."
):
BinnedTimeSeries.read(
CSV_FILE,
time_bin_start_column="time_start",
time_bin_end_column="missing",
format="csv",
)
def test_read_size_missing():
with pytest.raises(
ValueError, match=r"Bin size column 'missing' not found in the input data\."
):
BinnedTimeSeries.read(
CSV_FILE,
time_bin_start_column="time_start",
time_bin_size_column="missing",
time_bin_size_unit=u.second,
format="csv",
)
def test_read_time_unit_missing():
with pytest.raises(
ValueError,
match=(
r"The bin size unit should be specified as an astropy Unit using"
r" ``time_bin_size_unit``\."
),
):
BinnedTimeSeries.read(
CSV_FILE,
time_bin_start_column="time_start",
time_bin_size_column="bin_size",
format="csv",
)
def test_read():
timeseries = BinnedTimeSeries.read(
CSV_FILE,
time_bin_start_column="time_start",
time_bin_end_column="time_end",
format="csv",
)
assert timeseries.colnames == [
"time_bin_start",
"time_bin_size",
"bin_size",
"A",
"B",
"C",
"D",
"E",
"F",
]
assert len(timeseries) == 10
assert timeseries["B"].sum() == 1151.54
timeseries = BinnedTimeSeries.read(
CSV_FILE,
time_bin_start_column="time_start",
time_bin_size_column="bin_size",
time_bin_size_unit=u.second,
format="csv",
)
assert timeseries.colnames == [
"time_bin_start",
"time_bin_size",
"time_end",
"A",
"B",
"C",
"D",
"E",
"F",
]
assert len(timeseries) == 10
assert timeseries["B"].sum() == 1151.54
@pytest.mark.parametrize("cls", [BoxLeastSquares, LombScargle])
def test_periodogram(cls):
# Note that we don't need to check the actual results from the periodogram
# classes here since these are tested extensively in
# astropy.timeseries.periodograms.
ts = BinnedTimeSeries(
time_bin_start="2016-03-22T12:30:31",
time_bin_size=3 * u.s,
data=[[1, 4, 3], [3, 4, 3]],
names=["a", "b"],
)
p1 = cls.from_timeseries(ts, "a")
assert isinstance(p1, cls)
assert_allclose(p1.t.jd, ts.time_bin_center.jd)
assert_equal(p1.y, ts["a"])
assert p1.dy is None
p2 = cls.from_timeseries(ts, "a", uncertainty="b")
assert_quantity_allclose(p2.dy, ts["b"])
p3 = cls.from_timeseries(ts, "a", uncertainty=0.1)
assert_allclose(p3.dy, 0.1)
|
e4a45b6042203909d8f583238558afcca0ca4c968af1d419f91e7ea6c8ddc964 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["bls_fast", "bls_slow"]
from functools import partial
import numpy as np
from ._impl import bls_impl
def bls_slow(t, y, ivar, period, duration, oversample, use_likelihood):
"""Compute the periodogram using a brute force reference method
t : array-like
Sequence of observation times.
y : array-like
Sequence of observations associated with times t.
ivar : array-like
The inverse variance of ``y``.
period : array-like
The trial periods where the periodogram should be computed.
duration : array-like
The durations that should be tested.
oversample :
The resolution of the phase grid in units of durations.
use_likeliood : bool
If true, maximize the log likelihood over phase, duration, and depth.
Returns
-------
power : array-like
The periodogram evaluated at the periods in ``period``.
depth : array-like
The estimated depth of the maximum power model at each period.
depth_err : array-like
The 1-sigma uncertainty on ``depth``.
duration : array-like
The maximum power duration at each period.
transit_time : array-like
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like
The log likelihood of the maximum power model.
"""
f = partial(_bls_slow_one, t, y, ivar, duration, oversample, use_likelihood)
return _apply(f, period)
def bls_fast(t, y, ivar, period, duration, oversample, use_likelihood):
"""Compute the periodogram using an optimized Cython implementation
t : array-like
Sequence of observation times.
y : array-like
Sequence of observations associated with times t.
ivar : array-like
The inverse variance of ``y``.
period : array-like
The trial periods where the periodogram should be computed.
duration : array-like
The durations that should be tested.
oversample :
The resolution of the phase grid in units of durations.
use_likeliood : bool
If true, maximize the log likelihood over phase, duration, and depth.
Returns
-------
power : array-like
The periodogram evaluated at the periods in ``period``.
depth : array-like
The estimated depth of the maximum power model at each period.
depth_err : array-like
The 1-sigma uncertainty on ``depth``.
duration : array-like
The maximum power duration at each period.
transit_time : array-like
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like
The log likelihood of the maximum power model.
"""
return bls_impl(t, y, ivar, period, duration, oversample, use_likelihood)
def _bls_slow_one(t, y, ivar, duration, oversample, use_likelihood, period):
"""A private function to compute the brute force periodogram result"""
best = (-np.inf, None)
hp = 0.5 * period
min_t = np.min(t)
for dur in duration:
# Compute the phase grid (this is set by the duration and oversample).
d_phase = dur / oversample
phase = np.arange(0, period + d_phase, d_phase)
for t0 in phase:
# Figure out which data points are in and out of transit.
m_in = np.abs((t - min_t - t0 + hp) % period - hp) < 0.5 * dur
m_out = ~m_in
# Compute the estimates of the in and out-of-transit flux.
ivar_in = np.sum(ivar[m_in])
ivar_out = np.sum(ivar[m_out])
y_in = np.sum(y[m_in] * ivar[m_in]) / ivar_in
y_out = np.sum(y[m_out] * ivar[m_out]) / ivar_out
# Use this to compute the best fit depth and uncertainty.
depth = y_out - y_in
depth_err = np.sqrt(1.0 / ivar_in + 1.0 / ivar_out)
snr = depth / depth_err
# Compute the log likelihood of this model.
loglike = -0.5 * np.sum((y_in - y[m_in]) ** 2 * ivar[m_in])
loglike += 0.5 * np.sum((y_out - y[m_in]) ** 2 * ivar[m_in])
# Choose which objective should be used for the optimization.
if use_likelihood:
objective = loglike
else:
objective = snr
# If this model is better than any before, keep it.
if depth > 0 and objective > best[0]:
best = (
objective,
(
objective,
depth,
depth_err,
dur,
(t0 + min_t) % period,
snr,
loglike,
),
)
return best[1]
def _apply(f, period):
return tuple(map(np.array, zip(*map(f, period))))
|
86b1ebb67e31fbea30e1ca84b8c6c184c37a8907423331397ce0b86ecdc7e8fc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["BoxLeastSquares", "BoxLeastSquaresResults"]
import numpy as np
from astropy import units
from astropy import units as u
from astropy.time import Time, TimeDelta
from astropy.timeseries.periodograms.base import BasePeriodogram
from astropy.timeseries.periodograms.lombscargle.core import has_units, strip_units
from . import methods
def validate_unit_consistency(reference_object, input_object):
if has_units(reference_object):
input_object = units.Quantity(input_object, unit=reference_object.unit)
else:
if has_units(input_object):
input_object = units.Quantity(input_object, unit=units.one)
input_object = input_object.value
return input_object
class BoxLeastSquares(BasePeriodogram):
"""Compute the box least squares periodogram
This method is a commonly used tool for discovering transiting exoplanets
or eclipsing binaries in photometric time series datasets. This
implementation is based on the "box least squares (BLS)" method described
in [1]_ and [2]_.
Parameters
----------
t : array-like, `~astropy.units.Quantity`, `~astropy.time.Time`, or `~astropy.time.TimeDelta`
Sequence of observation times.
y : array-like or `~astropy.units.Quantity`
Sequence of observations associated with times ``t``.
dy : float, array-like, or `~astropy.units.Quantity`, optional
Error or sequence of observational errors associated with times ``t``.
Examples
--------
Generate noisy data with a transit:
>>> rand = np.random.default_rng(42)
>>> t = rand.uniform(0, 10, 500)
>>> y = np.ones_like(t)
>>> y[np.abs((t + 1.0)%2.0-1)<0.08] = 1.0 - 0.1
>>> y += 0.01 * rand.standard_normal(len(t))
Compute the transit periodogram on a heuristically determined period grid
and find the period with maximum power:
>>> model = BoxLeastSquares(t, y)
>>> results = model.autopower(0.16)
>>> results.period[np.argmax(results.power)] # doctest: +FLOAT_CMP
2.000412388152837
Compute the periodogram on a user-specified period grid:
>>> periods = np.linspace(1.9, 2.1, 5)
>>> results = model.power(periods, 0.16)
>>> results.power # doctest: +FLOAT_CMP
array([0.01723948, 0.0643028 , 0.1338783 , 0.09428816, 0.03577543])
If the inputs are AstroPy Quantities with units, the units will be
validated and the outputs will also be Quantities with appropriate units:
>>> from astropy import units as u
>>> t = t * u.day
>>> y = y * u.dimensionless_unscaled
>>> model = BoxLeastSquares(t, y)
>>> results = model.autopower(0.16 * u.day)
>>> results.period.unit
Unit("d")
>>> results.power.unit
Unit(dimensionless)
References
----------
.. [1] Kovacs, Zucker, & Mazeh (2002), A&A, 391, 369
(arXiv:astro-ph/0206099)
.. [2] Hartman & Bakos (2016), Astronomy & Computing, 17, 1
(arXiv:1605.06811)
"""
def __init__(self, t, y, dy=None):
# If t is a TimeDelta, convert it to a quantity. The units we convert
# to don't really matter since the user gets a Quantity back at the end
# so can convert to any units they like.
if isinstance(t, TimeDelta):
t = t.to("day")
# We want to expose self.t as being the times the user passed in, but
# if the times are absolute, we need to convert them to relative times
# internally, so we use self._trel and self._tstart for this.
self.t = t
if isinstance(self.t, (Time, TimeDelta)):
self._tstart = self.t[0]
trel = (self.t - self._tstart).to(u.day)
else:
self._tstart = None
trel = self.t
self._trel, self.y, self.dy = self._validate_inputs(trel, y, dy)
def autoperiod(
self,
duration,
minimum_period=None,
maximum_period=None,
minimum_n_transit=3,
frequency_factor=1.0,
):
"""Determine a suitable grid of periods
This method uses a set of heuristics to select a conservative period
grid that is uniform in frequency. This grid might be too fine for
some user's needs depending on the precision requirements or the
sampling of the data. The grid can be made coarser by increasing
``frequency_factor``.
Parameters
----------
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations that will be considered.
minimum_period, maximum_period : float or `~astropy.units.Quantity` ['time'], optional
The minimum/maximum periods to search. If not provided, these will
be computed as described in the notes below.
minimum_n_transit : int, optional
If ``maximum_period`` is not provided, this is used to compute the
maximum period to search by asserting that any systems with at
least ``minimum_n_transits`` will be within the range of searched
periods. Note that this is not the same as requiring that
``minimum_n_transits`` be required for detection. The default
value is ``3``.
frequency_factor : float, optional
A factor to control the frequency spacing as described in the
notes below. The default value is ``1.0``.
Returns
-------
period : array-like or `~astropy.units.Quantity` ['time']
The set of periods computed using these heuristics with the same
units as ``t``.
Notes
-----
The default minimum period is chosen to be twice the maximum duration
because there won't be much sensitivity to periods shorter than that.
The default maximum period is computed as
.. code-block:: python
maximum_period = (max(t) - min(t)) / minimum_n_transits
ensuring that any systems with at least ``minimum_n_transits`` are
within the range of searched periods.
The frequency spacing is given by
.. code-block:: python
df = frequency_factor * min(duration) / (max(t) - min(t))**2
so the grid can be made finer by decreasing ``frequency_factor`` or
coarser by increasing ``frequency_factor``.
"""
duration = self._validate_duration(duration)
baseline = strip_units(self._trel.max() - self._trel.min())
min_duration = strip_units(np.min(duration))
# Estimate the required frequency spacing
# Because of the sparsity of a transit, this must be much finer than
# the frequency resolution for a sinusoidal fit. For a sinusoidal fit,
# df would be 1/baseline (see LombScargle), but here this should be
# scaled proportionally to the duration in units of baseline.
df = frequency_factor * min_duration / baseline**2
# If a minimum period is not provided, choose one that is twice the
# maximum duration because we won't be sensitive to any periods
# shorter than that.
if minimum_period is None:
minimum_period = 2.0 * strip_units(np.max(duration))
else:
minimum_period = validate_unit_consistency(self._trel, minimum_period)
minimum_period = strip_units(minimum_period)
# If no maximum period is provided, choose one by requiring that
# all signals with at least minimum_n_transit should be detectable.
if maximum_period is None:
if minimum_n_transit <= 1:
raise ValueError("minimum_n_transit must be greater than 1")
maximum_period = baseline / (minimum_n_transit - 1)
else:
maximum_period = validate_unit_consistency(self._trel, maximum_period)
maximum_period = strip_units(maximum_period)
if maximum_period < minimum_period:
minimum_period, maximum_period = maximum_period, minimum_period
if minimum_period <= 0.0:
raise ValueError("minimum_period must be positive")
# Convert bounds to frequency
minimum_frequency = 1.0 / strip_units(maximum_period)
maximum_frequency = 1.0 / strip_units(minimum_period)
# Compute the number of frequencies and the frequency grid
nf = 1 + int(np.round((maximum_frequency - minimum_frequency) / df))
return 1.0 / (maximum_frequency - df * np.arange(nf)) * self._t_unit()
def autopower(
self,
duration,
objective=None,
method=None,
oversample=10,
minimum_n_transit=3,
minimum_period=None,
maximum_period=None,
frequency_factor=1.0,
):
"""Compute the periodogram at set of heuristically determined periods
This method calls :func:`BoxLeastSquares.autoperiod` to determine
the period grid and then :func:`BoxLeastSquares.power` to compute
the periodogram. See those methods for documentation of the arguments.
"""
period = self.autoperiod(
duration,
minimum_n_transit=minimum_n_transit,
minimum_period=minimum_period,
maximum_period=maximum_period,
frequency_factor=frequency_factor,
)
return self.power(
period, duration, objective=objective, method=method, oversample=oversample
)
def power(self, period, duration, objective=None, method=None, oversample=10):
"""Compute the periodogram for a set of periods
Parameters
----------
period : array-like or `~astropy.units.Quantity` ['time']
The periods where the power should be computed
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations to test
objective : {'likelihood', 'snr'}, optional
The scalar that should be optimized to find the best fit phase,
duration, and depth. This can be either ``'likelihood'`` (default)
to optimize the log-likelihood of the model, or ``'snr'`` to
optimize the signal-to-noise with which the transit depth is
measured.
method : {'fast', 'slow'}, optional
The computational method used to compute the periodogram. This is
mainly included for the purposes of testing and most users will
want to use the optimized ``'fast'`` method (default) that is
implemented in Cython. ``'slow'`` is a brute-force method that is
used to test the results of the ``'fast'`` method.
oversample : int, optional
The number of bins per duration that should be used. This sets the
time resolution of the phase fit with larger values of
``oversample`` yielding a finer grid and higher computational cost.
Returns
-------
results : BoxLeastSquaresResults
The periodogram results as a :class:`BoxLeastSquaresResults`
object.
Raises
------
ValueError
If ``oversample`` is not an integer greater than 0 or if
``objective`` or ``method`` are not valid.
"""
period, duration = self._validate_period_and_duration(period, duration)
# Check for absurdities in the ``oversample`` choice
try:
oversample = int(oversample)
except TypeError:
raise ValueError(f"oversample must be an int, got {oversample}")
if oversample < 1:
raise ValueError("oversample must be greater than or equal to 1")
# Select the periodogram objective
if objective is None:
objective = "likelihood"
allowed_objectives = ["snr", "likelihood"]
if objective not in allowed_objectives:
raise ValueError(
f"Unrecognized method '{objective}'\n"
f"allowed methods are: {allowed_objectives}"
)
use_likelihood = objective == "likelihood"
# Select the computational method
if method is None:
method = "fast"
allowed_methods = ["fast", "slow"]
if method not in allowed_methods:
raise ValueError(
f"Unrecognized method '{method}'\n"
f"allowed methods are: {allowed_methods}"
)
# Format and check the input arrays
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
t_ref = np.min(t)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = (
1.0 / np.ascontiguousarray(strip_units(self.dy), dtype=np.float64) ** 2
)
# Make sure that the period and duration arrays are C-order
period_fmt = np.ascontiguousarray(strip_units(period), dtype=np.float64)
duration = np.ascontiguousarray(strip_units(duration), dtype=np.float64)
# Select the correct implementation for the chosen method
if method == "fast":
bls = methods.bls_fast
else:
bls = methods.bls_slow
# Run the implementation
results = bls(
t - t_ref,
y - np.median(y),
ivar,
period_fmt,
duration,
oversample,
use_likelihood,
)
return self._format_results(t_ref, objective, period, results)
def _as_relative_time(self, name, times):
"""
Convert the provided times (if absolute) to relative times using the
current _tstart value. If the times provided are relative, they are
returned without conversion (though we still do some checks).
"""
if isinstance(times, TimeDelta):
times = times.to("day")
if self._tstart is None:
if isinstance(times, Time):
raise TypeError(
f"{name} was provided as an absolute time but "
"the BoxLeastSquares class was initialized "
"with relative times."
)
else:
if isinstance(times, Time):
times = (times - self._tstart).to(u.day)
else:
raise TypeError(
f"{name} was provided as a relative time but "
"the BoxLeastSquares class was initialized "
"with absolute times."
)
times = validate_unit_consistency(self._trel, times)
return times
def _as_absolute_time_if_needed(self, name, times):
"""
Convert the provided times to absolute times using the current _tstart
value, if needed.
"""
if self._tstart is not None:
# Some time formats/scales can't represent dates/times too far
# off from the present, so we need to mask values offset by
# more than 100,000 yr (the periodogram algorithm can return
# transit times of e.g 1e300 for some periods).
reset = np.abs(times.to_value(u.year)) > 100000
times[reset] = 0
times = self._tstart + times
times[reset] = np.nan
return times
def model(self, t_model, period, duration, transit_time):
"""Compute the transit model at the given period, duration, and phase
Parameters
----------
t_model : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
Times at which to compute the model.
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
y_model : array-like or `~astropy.units.Quantity`
The model evaluated at the times ``t_model`` with units of ``y``.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time("transit_time", transit_time)
t_model = strip_units(self._as_relative_time("t_model", t_model))
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = (
1.0 / np.ascontiguousarray(strip_units(self.dy), dtype=np.float64) ** 2
)
# Compute the depth
hp = 0.5 * period
m_in = np.abs((t - transit_time + hp) % period - hp) < 0.5 * duration
m_out = ~m_in
y_in = np.sum(y[m_in] * ivar[m_in]) / np.sum(ivar[m_in])
y_out = np.sum(y[m_out] * ivar[m_out]) / np.sum(ivar[m_out])
# Evaluate the model
y_model = y_out + np.zeros_like(t_model)
m_model = np.abs((t_model - transit_time + hp) % period - hp) < 0.5 * duration
y_model[m_model] = y_in
return y_model * self._y_unit()
def compute_stats(self, period, duration, transit_time):
"""Compute descriptive statistics for a given transit model
These statistics are commonly used for vetting of transit candidates.
Parameters
----------
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
stats : dict
A dictionary containing several descriptive statistics:
- ``depth``: The depth and uncertainty (as a tuple with two
values) on the depth for the fiducial model.
- ``depth_odd``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period.
- ``depth_even``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period and the
phase is offset by one orbital period.
- ``depth_half``: The depth and uncertainty for a model with a
period of half the fiducial period.
- ``depth_phased``: The depth and uncertainty for a model with the
fiducial period and the phase offset by half a period.
- ``harmonic_amplitude``: The amplitude of the best fit sinusoidal
model.
- ``harmonic_delta_log_likelihood``: The difference in log
likelihood between a sinusoidal model and the transit model.
If ``harmonic_delta_log_likelihood`` is greater than zero, the
sinusoidal model is preferred.
- ``transit_times``: The mid-transit time for each transit in the
baseline.
- ``per_transit_count``: An array with a count of the number of
data points in each unique transit included in the baseline.
- ``per_transit_log_likelihood``: An array with the value of the
log likelihood for each unique transit included in the
baseline.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time("transit_time", transit_time)
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = (
1.0 / np.ascontiguousarray(strip_units(self.dy), dtype=np.float64) ** 2
)
# This a helper function that will compute the depth for several
# different hypothesized transit models with different parameters
def _compute_depth(m, y_out=None, var_out=None):
if np.any(m) and (var_out is None or np.isfinite(var_out)):
var_m = 1.0 / np.sum(ivar[m])
y_m = np.sum(y[m] * ivar[m]) * var_m
if y_out is None:
return y_m, var_m
return y_out - y_m, np.sqrt(var_m + var_out)
return 0.0, np.inf
# Compute the depth of the fiducial model and the two models at twice
# the period
hp = 0.5 * period
m_in = np.abs((t - transit_time + hp) % period - hp) < 0.5 * duration
m_out = ~m_in
m_odd = np.abs((t - transit_time) % (2 * period) - period) < 0.5 * duration
m_even = (
np.abs((t - transit_time + period) % (2 * period) - period) < 0.5 * duration
)
y_out, var_out = _compute_depth(m_out)
depth = _compute_depth(m_in, y_out, var_out)
depth_odd = _compute_depth(m_odd, y_out, var_out)
depth_even = _compute_depth(m_even, y_out, var_out)
y_in = y_out - depth[0]
# Compute the depth of the model at a phase of 0.5*period
m_phase = np.abs((t - transit_time) % period - hp) < 0.5 * duration
depth_phase = _compute_depth(m_phase, *_compute_depth((~m_phase) & m_out))
# Compute the depth of a model with a period of 0.5*period
m_half = (
np.abs((t - transit_time + 0.25 * period) % (0.5 * period) - 0.25 * period)
< 0.5 * duration
)
depth_half = _compute_depth(m_half, *_compute_depth(~m_half))
# Compute the number of points in each transit
transit_id = np.round((t[m_in] - transit_time) / period).astype(int)
transit_times = (
period * np.arange(transit_id.min(), transit_id.max() + 1) + transit_time
)
unique_ids, unique_counts = np.unique(transit_id, return_counts=True)
unique_ids -= np.min(transit_id)
transit_id -= np.min(transit_id)
counts = np.zeros(np.max(transit_id) + 1, dtype=int)
counts[unique_ids] = unique_counts
# Compute the per-transit log likelihood
ll = -0.5 * ivar[m_in] * ((y[m_in] - y_in) ** 2 - (y[m_in] - y_out) ** 2)
lls = np.zeros(len(counts))
for i in unique_ids:
lls[i] = np.sum(ll[transit_id == i])
full_ll = -0.5 * np.sum(ivar[m_in] * (y[m_in] - y_in) ** 2)
full_ll -= 0.5 * np.sum(ivar[m_out] * (y[m_out] - y_out) ** 2)
# Compute the log likelihood of a sine model
A = np.vstack(
(
np.sin(2 * np.pi * t / period),
np.cos(2 * np.pi * t / period),
np.ones_like(t),
)
).T
w = np.linalg.solve(np.dot(A.T, A * ivar[:, None]), np.dot(A.T, y * ivar))
mod = np.dot(A, w)
sin_ll = -0.5 * np.sum((y - mod) ** 2 * ivar)
# Format the results
y_unit = self._y_unit()
ll_unit = 1
if self.dy is None:
ll_unit = y_unit * y_unit
return dict(
transit_times=self._as_absolute_time_if_needed(
"transit_times", transit_times * self._t_unit()
),
per_transit_count=counts,
per_transit_log_likelihood=lls * ll_unit,
depth=(depth[0] * y_unit, depth[1] * y_unit),
depth_phased=(depth_phase[0] * y_unit, depth_phase[1] * y_unit),
depth_half=(depth_half[0] * y_unit, depth_half[1] * y_unit),
depth_odd=(depth_odd[0] * y_unit, depth_odd[1] * y_unit),
depth_even=(depth_even[0] * y_unit, depth_even[1] * y_unit),
harmonic_amplitude=np.sqrt(np.sum(w[:2] ** 2)) * y_unit,
harmonic_delta_log_likelihood=(sin_ll - full_ll) * ll_unit,
)
def transit_mask(self, t, period, duration, transit_time):
"""Compute which data points are in transit for a given parameter set
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
Times where the mask should be evaluated.
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
transit_mask : array-like
A boolean array where ``True`` indicates and in transit point and
``False`` indicates and out-of-transit point.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time("transit_time", transit_time)
t = strip_units(self._as_relative_time("t", t))
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
hp = 0.5 * period
return np.abs((t - transit_time + hp) % period - hp) < 0.5 * duration
def _validate_inputs(self, t, y, dy):
"""Private method used to check the consistency of the inputs
Parameters
----------
t : array-like, `~astropy.units.Quantity`, `~astropy.time.Time`, or `~astropy.time.TimeDelta`
Sequence of observation times.
y : array-like or `~astropy.units.Quantity`
Sequence of observations associated with times t.
dy : float, array-like, or `~astropy.units.Quantity`
Error or sequence of observational errors associated with times t.
Returns
-------
t, y, dy : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
The inputs with consistent shapes and units.
Raises
------
ValueError
If the dimensions are incompatible or if the units of dy cannot be
converted to the units of y.
"""
# Validate shapes of inputs
if dy is None:
t, y = np.broadcast_arrays(t, y, subok=True)
else:
t, y, dy = np.broadcast_arrays(t, y, dy, subok=True)
if t.ndim != 1:
raise ValueError("Inputs (t, y, dy) must be 1-dimensional")
# validate units of inputs if any is a Quantity
if dy is not None:
dy = validate_unit_consistency(y, dy)
return t, y, dy
def _validate_duration(self, duration):
"""Private method used to check a set of test durations
Parameters
----------
duration : float, array-like, or `~astropy.units.Quantity`
The set of durations that will be considered.
Returns
-------
duration : array-like or `~astropy.units.Quantity`
The input reformatted with the correct shape and units.
Raises
------
ValueError
If the units of duration cannot be converted to the units of t.
"""
duration = np.atleast_1d(np.abs(duration))
if duration.ndim != 1 or duration.size == 0:
raise ValueError("duration must be 1-dimensional")
return validate_unit_consistency(self._trel, duration)
def _validate_period_and_duration(self, period, duration):
"""Private method used to check a set of periods and durations
Parameters
----------
period : float, array-like, or `~astropy.units.Quantity` ['time']
The set of test periods.
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations that will be considered.
Returns
-------
period, duration : array-like or `~astropy.units.Quantity` ['time']
The inputs reformatted with the correct shapes and units.
Raises
------
ValueError
If the units of period or duration cannot be converted to the
units of t.
"""
duration = self._validate_duration(duration)
period = np.atleast_1d(np.abs(period))
if period.ndim != 1 or period.size == 0:
raise ValueError("period must be 1-dimensional")
period = validate_unit_consistency(self._trel, period)
if not np.min(period) > np.max(duration):
raise ValueError(
"The maximum transit duration must be shorter than the minimum period"
)
return period, duration
def _format_results(self, t_ref, objective, period, results):
"""A private method used to wrap and add units to the periodogram
Parameters
----------
t_ref : float
The minimum time in the time series (a reference time).
objective : str
The name of the objective used in the optimization.
period : array-like or `~astropy.units.Quantity` ['time']
The set of trial periods.
results : tuple
The output of one of the periodogram implementations.
"""
(
power,
depth,
depth_err,
duration,
transit_time,
depth_snr,
log_likelihood,
) = results
transit_time += t_ref
if has_units(self._trel):
transit_time = units.Quantity(transit_time, unit=self._trel.unit)
transit_time = self._as_absolute_time_if_needed(
"transit_time", transit_time
)
duration = units.Quantity(duration, unit=self._trel.unit)
if has_units(self.y):
depth = units.Quantity(depth, unit=self.y.unit)
depth_err = units.Quantity(depth_err, unit=self.y.unit)
depth_snr = units.Quantity(depth_snr, unit=units.one)
if self.dy is None:
if objective == "likelihood":
power = units.Quantity(power, unit=self.y.unit**2)
else:
power = units.Quantity(power, unit=units.one)
log_likelihood = units.Quantity(log_likelihood, unit=self.y.unit**2)
else:
power = units.Quantity(power, unit=units.one)
log_likelihood = units.Quantity(log_likelihood, unit=units.one)
return BoxLeastSquaresResults(
objective,
period,
power,
depth,
depth_err,
duration,
transit_time,
depth_snr,
log_likelihood,
)
def _t_unit(self):
if has_units(self._trel):
return self._trel.unit
else:
return 1
def _y_unit(self):
if has_units(self.y):
return self.y.unit
else:
return 1
class BoxLeastSquaresResults(dict):
"""The results of a BoxLeastSquares search
Attributes
----------
objective : str
The scalar used to optimize to find the best fit phase, duration, and
depth. See :func:`BoxLeastSquares.power` for more information.
period : array-like or `~astropy.units.Quantity` ['time']
The set of test periods.
power : array-like or `~astropy.units.Quantity`
The periodogram evaluated at the periods in ``period``. If
``objective`` is:
* ``'likelihood'``: the values of ``power`` are the
log likelihood maximized over phase, depth, and duration, or
* ``'snr'``: the values of ``power`` are the signal-to-noise with
which the depth is measured maximized over phase, depth, and
duration.
depth : array-like or `~astropy.units.Quantity`
The estimated depth of the maximum power model at each period.
depth_err : array-like or `~astropy.units.Quantity`
The 1-sigma uncertainty on ``depth``.
duration : array-like or `~astropy.units.Quantity` ['time']
The maximum power duration at each period.
transit_time : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like or `~astropy.units.Quantity`
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like or `~astropy.units.Quantity`
The log likelihood of the maximum power model.
"""
def __init__(self, *args):
super().__init__(
zip(
(
"objective",
"period",
"power",
"depth",
"depth_err",
"duration",
"transit_time",
"depth_snr",
"log_likelihood",
),
args,
)
)
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return "\n".join(
[k.rjust(m) + ": " + repr(v) for k, v in sorted(self.items())]
)
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
|
88d9d624a25f88d64f187a074830cad1f0c48e018383faf2f48e2bf0ba97b89e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from os.path import join
import numpy
from setuptools import Extension
BLS_ROOT = os.path.relpath(os.path.dirname(__file__))
def get_extensions():
ext = Extension(
"astropy.timeseries.periodograms.bls._impl",
sources=[
join(BLS_ROOT, "bls.c"),
join(BLS_ROOT, "_impl.pyx"),
],
include_dirs=[numpy.get_include()],
)
return [ext]
|
e3554d34d18d4a04577f5535a37fef0b27cc57e7cc0af414c947a10bdaa38143 | """
Utilities for computing periodogram statistics.
This is an internal module; users should access this functionality via the
``false_alarm_probability`` and ``false_alarm_level`` methods of the
``astropy.timeseries.LombScargle`` API.
"""
from functools import wraps
import numpy as np
from astropy import units as u
def _weighted_sum(val, dy):
if dy is not None:
return (val / dy**2).sum()
else:
return val.sum()
def _weighted_mean(val, dy):
if dy is None:
return val.mean()
else:
return _weighted_sum(val, dy) / _weighted_sum(np.ones(val.shape), dy)
def _weighted_var(val, dy):
return _weighted_mean(val**2, dy) - _weighted_mean(val, dy) ** 2
def _gamma(N):
from scipy.special import gammaln
# Note: this is closely approximated by (1 - 0.75 / N) for large N
return np.sqrt(2 / N) * np.exp(gammaln(N / 2) - gammaln((N - 1) / 2))
def vectorize_first_argument(func):
@wraps(func)
def new_func(x, *args, **kwargs):
x = np.asarray(x)
return np.array([func(xi, *args, **kwargs) for xi in x.flat]).reshape(x.shape)
return new_func
def pdf_single(z, N, normalization, dH=1, dK=3):
"""Probability density function for Lomb-Scargle periodogram
Compute the expected probability density function of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
The periodogram value.
N : int
The number of data points from which the periodogram was computed.
normalization : {'standard', 'model', 'log', 'psd'}
The periodogram normalization.
dH, dK : int, optional
The number of parameters in the null hypothesis and the model.
Returns
-------
pdf : np.ndarray
The expected probability density function.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
z = np.asarray(z)
if dK - dH != 2:
raise NotImplementedError("Degrees of freedom != 2")
Nk = N - dK
if normalization == "psd":
return np.exp(-z)
elif normalization == "standard":
return 0.5 * Nk * (1 - z) ** (0.5 * Nk - 1)
elif normalization == "model":
return 0.5 * Nk * (1 + z) ** (-0.5 * Nk - 1)
elif normalization == "log":
return 0.5 * Nk * np.exp(-0.5 * Nk * z)
else:
raise ValueError(f"normalization='{normalization}' is not recognized")
def fap_single(z, N, normalization, dH=1, dK=3):
"""Single-frequency false alarm probability for the Lomb-Scargle periodogram
This is equal to 1 - cdf, where cdf is the cumulative distribution.
The single-frequency false alarm probability should not be confused with
the false alarm probability for the largest peak.
Parameters
----------
z : array-like
The periodogram value.
N : int
The number of data points from which the periodogram was computed.
normalization : {'standard', 'model', 'log', 'psd'}
The periodogram normalization.
dH, dK : int, optional
The number of parameters in the null hypothesis and the model.
Returns
-------
false_alarm_probability : np.ndarray
The single-frequency false alarm probability.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
z = np.asarray(z)
if dK - dH != 2:
raise NotImplementedError("Degrees of freedom != 2")
Nk = N - dK
if normalization == "psd":
return np.exp(-z)
elif normalization == "standard":
return (1 - z) ** (0.5 * Nk)
elif normalization == "model":
return (1 + z) ** (-0.5 * Nk)
elif normalization == "log":
return np.exp(-0.5 * Nk * z)
else:
raise ValueError(f"normalization='{normalization}' is not recognized")
def inv_fap_single(fap, N, normalization, dH=1, dK=3):
"""Single-frequency inverse false alarm probability
This function computes the periodogram value associated with the specified
single-frequency false alarm probability. This should not be confused with
the false alarm level of the largest peak.
Parameters
----------
fap : array-like
The false alarm probability.
N : int
The number of data points from which the periodogram was computed.
normalization : {'standard', 'model', 'log', 'psd'}
The periodogram normalization.
dH, dK : int, optional
The number of parameters in the null hypothesis and the model.
Returns
-------
z : np.ndarray
The periodogram power corresponding to the single-peak false alarm
probability.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
fap = np.asarray(fap)
if dK - dH != 2:
raise NotImplementedError("Degrees of freedom != 2")
Nk = N - dK
# No warnings for fap = 0; rather, just let it give the right infinity.
with np.errstate(divide="ignore"):
if normalization == "psd":
return -np.log(fap)
elif normalization == "standard":
return 1 - fap ** (2 / Nk)
elif normalization == "model":
return -1 + fap ** (-2 / Nk)
elif normalization == "log":
return -2 / Nk * np.log(fap)
else:
raise ValueError(f"normalization='{normalization}' is not recognized")
def cdf_single(z, N, normalization, dH=1, dK=3):
"""Cumulative distribution for the Lomb-Scargle periodogram
Compute the expected cumulative distribution of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
The periodogram value.
N : int
The number of data points from which the periodogram was computed.
normalization : {'standard', 'model', 'log', 'psd'}
The periodogram normalization.
dH, dK : int, optional
The number of parameters in the null hypothesis and the model.
Returns
-------
cdf : np.ndarray
The expected cumulative distribution function.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
return 1 - fap_single(z, N, normalization=normalization, dH=dH, dK=dK)
def tau_davies(Z, fmax, t, y, dy, normalization="standard", dH=1, dK=3):
"""tau factor for estimating Davies bound (Baluev 2008, Table 1)"""
N = len(t)
NH = N - dH # DOF for null hypothesis
NK = N - dK # DOF for periodic hypothesis
Dt = _weighted_var(t, dy)
Teff = np.sqrt(4 * np.pi * Dt) # Effective baseline
W = fmax * Teff
Z = np.asarray(Z)
if normalization == "psd":
# 'psd' normalization is same as Baluev's z
return W * np.exp(-Z) * np.sqrt(Z)
elif normalization == "standard":
# 'standard' normalization is Z = 2/NH * z_1
return _gamma(NH) * W * (1 - Z) ** (0.5 * (NK - 1)) * np.sqrt(0.5 * NH * Z)
elif normalization == "model":
# 'model' normalization is Z = 2/NK * z_2
return _gamma(NK) * W * (1 + Z) ** (-0.5 * NK) * np.sqrt(0.5 * NK * Z)
elif normalization == "log":
# 'log' normalization is Z = 2/NK * z_3
return (
_gamma(NK)
* W
* np.exp(-0.5 * Z * (NK - 0.5))
* np.sqrt(NK * np.sinh(0.5 * Z))
)
else:
raise NotImplementedError(f"normalization={normalization}")
def fap_naive(Z, fmax, t, y, dy, normalization="standard"):
"""False Alarm Probability based on estimated number of indep frequencies"""
N = len(t)
T = max(t) - min(t)
N_eff = fmax * T
fap_s = fap_single(Z, N, normalization=normalization)
# result is 1 - (1 - fap_s) ** N_eff
# this is much more precise for small Z / large N
# Ignore divide by zero no np.log1p - fine to let it return -inf.
with np.errstate(divide="ignore"):
return -np.expm1(N_eff * np.log1p(-fap_s))
def inv_fap_naive(fap, fmax, t, y, dy, normalization="standard"):
"""Inverse FAP based on estimated number of indep frequencies"""
fap = np.asarray(fap)
N = len(t)
T = max(t) - min(t)
N_eff = fmax * T
# fap_s = 1 - (1 - fap) ** (1 / N_eff)
# Ignore divide by zero no np.log - fine to let it return -inf.
with np.errstate(divide="ignore"):
fap_s = -np.expm1(np.log(1 - fap) / N_eff)
return inv_fap_single(fap_s, N, normalization)
def fap_davies(Z, fmax, t, y, dy, normalization="standard"):
"""Davies upper-bound to the false alarm probability
(Eqn 5 of Baluev 2008)
"""
N = len(t)
fap_s = fap_single(Z, N, normalization=normalization)
tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization)
return fap_s + tau
@vectorize_first_argument
def inv_fap_davies(p, fmax, t, y, dy, normalization="standard"):
"""Inverse of the davies upper-bound"""
from scipy import optimize
args = (fmax, t, y, dy, normalization)
z0 = inv_fap_naive(p, *args)
func = lambda z, *args: fap_davies(z, *args) - p
res = optimize.root(func, z0, args=args, method="lm")
if not res.success:
raise ValueError(f"inv_fap_baluev did not converge for p={p}")
return res.x
def fap_baluev(Z, fmax, t, y, dy, normalization="standard"):
"""Alias-free approximation to false alarm probability
(Eqn 6 of Baluev 2008)
"""
fap_s = fap_single(Z, len(t), normalization)
tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization)
# result is 1 - (1 - fap_s) * np.exp(-tau)
# this is much more precise for small numbers
return -np.expm1(-tau) + fap_s * np.exp(-tau)
@vectorize_first_argument
def inv_fap_baluev(p, fmax, t, y, dy, normalization="standard"):
"""Inverse of the Baluev alias-free approximation"""
from scipy import optimize
args = (fmax, t, y, dy, normalization)
z0 = inv_fap_naive(p, *args)
func = lambda z, *args: fap_baluev(z, *args) - p
res = optimize.root(func, z0, args=args, method="lm")
if not res.success:
raise ValueError(f"inv_fap_baluev did not converge for p={p}")
return res.x
def _bootstrap_max(t, y, dy, fmax, normalization, random_seed, n_bootstrap=1000):
"""Generate a sequence of bootstrap estimates of the max"""
from .core import LombScargle
rng = np.random.default_rng(random_seed)
power_max = []
for _ in range(n_bootstrap):
s = rng.integers(0, len(y), len(y)) # sample with replacement
ls_boot = LombScargle(
t, y[s], dy if dy is None else dy[s], normalization=normalization
)
freq, power = ls_boot.autopower(maximum_frequency=fmax)
power_max.append(power.max())
power_max = u.Quantity(power_max)
power_max.sort()
return power_max
def fap_bootstrap(
Z, fmax, t, y, dy, normalization="standard", n_bootstraps=1000, random_seed=None
):
"""Bootstrap estimate of the false alarm probability"""
pmax = _bootstrap_max(t, y, dy, fmax, normalization, random_seed, n_bootstraps)
return 1 - np.searchsorted(pmax, Z) / len(pmax)
def inv_fap_bootstrap(
fap, fmax, t, y, dy, normalization="standard", n_bootstraps=1000, random_seed=None
):
"""Bootstrap estimate of the inverse false alarm probability"""
fap = np.asarray(fap)
pmax = _bootstrap_max(t, y, dy, fmax, normalization, random_seed, n_bootstraps)
return pmax[np.clip(np.floor((1 - fap) * len(pmax)).astype(int), 0, len(pmax) - 1)]
METHODS = {
"single": fap_single,
"naive": fap_naive,
"davies": fap_davies,
"baluev": fap_baluev,
"bootstrap": fap_bootstrap,
}
def false_alarm_probability(
Z, fmax, t, y, dy, normalization="standard", method="baluev", method_kwds=None
):
"""Compute the approximate false alarm probability for periodogram peaks Z
This gives an estimate of the false alarm probability for the largest value
in a periodogram, based on the null hypothesis of non-varying data with
Gaussian noise. The true probability cannot be computed analytically, so
each method available here is an approximation to the true value.
Parameters
----------
Z : array-like
The periodogram value.
fmax : float
The maximum frequency of the periodogram.
t, y, dy : array-like
The data times, values, and errors.
normalization : {'standard', 'model', 'log', 'psd'}, optional
The periodogram normalization.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use.
method_kwds : dict, optional
Additional method-specific keywords.
Returns
-------
false_alarm_probability : np.ndarray
The false alarm probability.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
See Also
--------
false_alarm_level : compute the periodogram level for a particular fap
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if method == "single":
return fap_single(Z, len(t), normalization)
elif method not in METHODS:
raise ValueError(f"Unrecognized method: {method}")
method = METHODS[method]
method_kwds = method_kwds or {}
return method(Z, fmax, t, y, dy, normalization, **method_kwds)
INV_METHODS = {
"single": inv_fap_single,
"naive": inv_fap_naive,
"davies": inv_fap_davies,
"baluev": inv_fap_baluev,
"bootstrap": inv_fap_bootstrap,
}
def false_alarm_level(
p, fmax, t, y, dy, normalization, method="baluev", method_kwds=None
):
"""Compute the approximate periodogram level given a false alarm probability
This gives an estimate of the periodogram level corresponding to a specified
false alarm probability for the largest peak, assuming a null hypothesis
of non-varying data with Gaussian noise. The true level cannot be computed
analytically, so each method available here is an approximation to the true
value.
Parameters
----------
p : array-like
The false alarm probability (0 < p < 1).
fmax : float
The maximum frequency of the periodogram.
t, y, dy : arrays
The data times, values, and errors.
normalization : {'standard', 'model', 'log', 'psd'}, optional
The periodogram normalization.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use.
method_kwds : dict, optional
Additional method-specific keywords.
Returns
-------
z : np.ndarray
The periodogram level.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
See Also
--------
false_alarm_probability : compute the fap for a given periodogram level
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if method == "single":
return inv_fap_single(p, len(t), normalization)
elif method not in INV_METHODS:
raise ValueError(f"Unrecognized method: {method}")
method = INV_METHODS[method]
method_kwds = method_kwds or {}
return method(p, fmax, t, y, dy, normalization, **method_kwds)
|
93239015756ec36e1d684cf3938c60bc61bcdb97bf24cafb97c4f399440e233d | """Main Lomb-Scargle Implementation"""
import numpy as np
from astropy import units
from astropy import units as u
from astropy.time import Time, TimeDelta
from astropy.timeseries.periodograms.base import BasePeriodogram
from . import _statistics
from .implementations import available_methods, lombscargle
from .implementations.mle import design_matrix, periodic_fit
def has_units(obj):
return hasattr(obj, "unit")
def get_unit(obj):
return getattr(obj, "unit", 1)
def strip_units(*arrs):
strip = lambda a: None if a is None else np.asarray(a)
if len(arrs) == 1:
return strip(arrs[0])
else:
return map(strip, arrs)
class LombScargle(BasePeriodogram):
"""Compute the Lomb-Scargle Periodogram.
This implementations here are based on code presented in [1]_ and [2]_;
if you use this functionality in an academic application, citation of
those works would be appreciated.
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
sequence of observation times
y : array-like or `~astropy.units.Quantity`
sequence of observations associated with times t
dy : float, array-like, or `~astropy.units.Quantity`, optional
error or sequence of observational errors associated with times t
fit_mean : bool, optional
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if fit_mean = False
nterms : int, optional
number of terms to use in the Fourier fit
normalization : {'standard', 'model', 'log', 'psd'}, optional
Normalization to use for the periodogram.
Examples
--------
Generate noisy periodic data:
>>> rand = np.random.default_rng(42)
>>> t = 100 * rand.random(100)
>>> y = np.sin(2 * np.pi * t) + rand.standard_normal(100)
Compute the Lomb-Scargle periodogram on an automatically-determined
frequency grid & find the frequency of max power:
>>> frequency, power = LombScargle(t, y).autopower()
>>> frequency[np.argmax(power)] # doctest: +FLOAT_CMP
1.0007641728995051
Compute the Lomb-Scargle periodogram at a user-specified frequency grid:
>>> freq = np.arange(0.8, 1.3, 0.1)
>>> LombScargle(t, y).power(freq) # doctest: +FLOAT_CMP
array([0.0792948 , 0.01778874, 0.25328167, 0.01064157, 0.01471387])
If the inputs are astropy Quantities with units, the units will be
validated and the outputs will also be Quantities with appropriate units:
>>> from astropy import units as u
>>> t = t * u.s
>>> y = y * u.mag
>>> frequency, power = LombScargle(t, y).autopower()
>>> frequency.unit
Unit("1 / s")
>>> power.unit
Unit(dimensionless)
Note here that the Lomb-Scargle power is always a unitless quantity,
because it is related to the :math:`\\chi^2` of the best-fit periodic
model at each frequency.
References
----------
.. [1] Vanderplas, J., Connolly, A. Ivezic, Z. & Gray, A. *Introduction to
astroML: Machine learning for astrophysics*. Proceedings of the
Conference on Intelligent Data Understanding (2012)
.. [2] VanderPlas, J. & Ivezic, Z. *Periodograms for Multiband Astronomical
Time Series*. ApJ 812.1:18 (2015)
"""
available_methods = available_methods()
def __init__(
self,
t,
y,
dy=None,
fit_mean=True,
center_data=True,
nterms=1,
normalization="standard",
):
# If t is a TimeDelta, convert it to a quantity. The units we convert
# to don't really matter since the user gets a Quantity back at the end
# so can convert to any units they like.
if isinstance(t, TimeDelta):
t = t.to("day")
# We want to expose self.t as being the times the user passed in, but
# if the times are absolute, we need to convert them to relative times
# internally, so we use self._trel and self._tstart for this.
self.t = t
if isinstance(self.t, Time):
self._tstart = self.t[0]
trel = (self.t - self._tstart).to(u.day)
else:
self._tstart = None
trel = self.t
self._trel, self.y, self.dy = self._validate_inputs(trel, y, dy)
self.fit_mean = fit_mean
self.center_data = center_data
self.nterms = nterms
self.normalization = normalization
def _validate_inputs(self, t, y, dy):
# Validate shapes of inputs
if dy is None:
t, y = np.broadcast_arrays(t, y, subok=True)
else:
t, y, dy = np.broadcast_arrays(t, y, dy, subok=True)
if t.ndim != 1:
raise ValueError("Inputs (t, y, dy) must be 1-dimensional")
# validate units of inputs if any is a Quantity
if any(has_units(arr) for arr in (t, y, dy)):
t, y = map(units.Quantity, (t, y))
if dy is not None:
dy = units.Quantity(dy)
try:
dy = units.Quantity(dy, unit=y.unit)
except units.UnitConversionError:
raise ValueError("Units of dy not equivalent to units of y")
return t, y, dy
def _validate_frequency(self, frequency):
frequency = np.asanyarray(frequency)
if has_units(self._trel):
frequency = units.Quantity(frequency)
try:
frequency = units.Quantity(frequency, unit=1.0 / self._trel.unit)
except units.UnitConversionError:
raise ValueError("Units of frequency not equivalent to units of 1/t")
else:
if has_units(frequency):
raise ValueError("frequency have units while 1/t doesn't.")
return frequency
def _validate_t(self, t):
t = np.asanyarray(t)
if has_units(self._trel):
t = units.Quantity(t)
try:
t = units.Quantity(t, unit=self._trel.unit)
except units.UnitConversionError:
raise ValueError("Units of t not equivalent to units of input self.t")
return t
def _power_unit(self, norm):
if has_units(self.y):
if self.dy is None and norm == "psd":
return self.y.unit**2
else:
return units.dimensionless_unscaled
else:
return 1
def autofrequency(
self,
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
return_freq_limits=False,
):
"""Determine a suitable frequency grid for data.
Note that this assumes the peak width is driven by the observational
baseline, which is generally a good assumption when the baseline is
much larger than the oscillation period.
If you are searching for periods longer than the baseline of your
observations, this may not perform well.
Even with a large baseline, be aware that the maximum frequency
returned is based on the concept of "average Nyquist frequency", which
may not be useful for irregularly-sampled data. The maximum frequency
can be adjusted via the nyquist_factor argument, or through the
maximum_frequency argument.
Parameters
----------
samples_per_peak : float, optional
The approximate number of desired samples across the typical peak
nyquist_factor : float, optional
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float, optional
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline.
maximum_frequency : float, optional
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency.
return_freq_limits : bool, optional
if True, return only the frequency limits rather than the full
frequency grid.
Returns
-------
frequency : ndarray or `~astropy.units.Quantity` ['frequency']
The heuristically-determined optimal frequency bin
"""
baseline = self._trel.max() - self._trel.min()
n_samples = self._trel.size
df = 1.0 / baseline / samples_per_peak
if minimum_frequency is None:
minimum_frequency = 0.5 * df
if maximum_frequency is None:
avg_nyquist = 0.5 * n_samples / baseline
maximum_frequency = nyquist_factor * avg_nyquist
Nf = 1 + int(np.round((maximum_frequency - minimum_frequency) / df))
if return_freq_limits:
return minimum_frequency, minimum_frequency + df * (Nf - 1)
else:
return minimum_frequency + df * np.arange(Nf)
def autopower(
self,
method="auto",
method_kwds=None,
normalization=None,
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
):
"""Compute Lomb-Scargle power at automatically-determined frequencies.
Parameters
----------
method : str, optional
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- 'slow': use the O[N^2] pure-python implementation
- 'cython': use the O[N^2] cython implementation. This is slightly
faster than method='slow', but much more memory efficient.
- 'chi2': use the O[N^2] chi2/linear-fitting implementation
- 'fastchi2': use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless ``assume_regular_frequency`` is set to True.
- 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
method_kwds : dict, optional
additional keywords to pass to the lomb-scargle method
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
samples_per_peak : float, optional
The approximate number of desired samples across the typical peak
nyquist_factor : float, optional
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline. Should be `~astropy.units.Quantity`
if inputs to LombScargle are `~astropy.units.Quantity`.
maximum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency. Should be `~astropy.units.Quantity`
if inputs to LombScargle are `~astropy.units.Quantity`.
Returns
-------
frequency, power : ndarray
The frequency and Lomb-Scargle power
"""
frequency = self.autofrequency(
samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
)
power = self.power(
frequency,
normalization=normalization,
method=method,
method_kwds=method_kwds,
assume_regular_frequency=True,
)
return frequency, power
def power(
self,
frequency,
normalization=None,
method="auto",
assume_regular_frequency=False,
method_kwds=None,
):
"""Compute the Lomb-Scargle power at the given frequencies.
Parameters
----------
frequency : array-like or `~astropy.units.Quantity` ['frequency']
frequencies (not angular frequencies) at which to evaluate the
periodogram. Note that in order to use method='fast', frequencies
must be regularly-spaced.
method : str, optional
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- 'slow': use the O[N^2] pure-python implementation
- 'cython': use the O[N^2] cython implementation. This is slightly
faster than method='slow', but much more memory efficient.
- 'chi2': use the O[N^2] chi2/linear-fitting implementation
- 'fastchi2': use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless ``assume_regular_frequency`` is set to True.
- 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
assume_regular_frequency : bool, optional
if True, assume that the input frequency is of the form
freq = f0 + df * np.arange(N). Only referenced if method is 'auto'
or 'fast'.
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
method_kwds : dict, optional
additional keywords to pass to the lomb-scargle method
Returns
-------
power : ndarray
The Lomb-Scargle power at the specified frequency
"""
if normalization is None:
normalization = self.normalization
frequency = self._validate_frequency(frequency)
power = lombscargle(
*strip_units(self._trel, self.y, self.dy),
frequency=strip_units(frequency),
center_data=self.center_data,
fit_mean=self.fit_mean,
nterms=self.nterms,
normalization=normalization,
method=method,
method_kwds=method_kwds,
assume_regular_frequency=assume_regular_frequency,
)
return power * self._power_unit(normalization)
def _as_relative_time(self, name, times):
"""
Convert the provided times (if absolute) to relative times using the
current _tstart value. If the times provided are relative, they are
returned without conversion (though we still do some checks).
"""
if isinstance(times, TimeDelta):
times = times.to("day")
if self._tstart is None:
if isinstance(times, Time):
raise TypeError(
f"{name} was provided as an absolute time but "
"the LombScargle class was initialized "
"with relative times."
)
else:
if isinstance(times, Time):
times = (times - self._tstart).to(u.day)
else:
raise TypeError(
f"{name} was provided as a relative time but "
"the LombScargle class was initialized "
"with absolute times."
)
return times
def model(self, t, frequency):
"""Compute the Lomb-Scargle model at the given frequency.
The model at a particular frequency is a linear model:
model = offset + dot(design_matrix, model_parameters)
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
Times (length ``n_samples``) at which to compute the model.
frequency : float
the frequency for the model
Returns
-------
y : np.ndarray
The model fit corresponding to the input times
(will have length ``n_samples``).
See Also
--------
design_matrix
offset
model_parameters
"""
frequency = self._validate_frequency(frequency)
t = self._validate_t(self._as_relative_time("t", t))
y_fit = periodic_fit(
*strip_units(self._trel, self.y, self.dy),
frequency=strip_units(frequency),
t_fit=strip_units(t),
center_data=self.center_data,
fit_mean=self.fit_mean,
nterms=self.nterms,
)
return y_fit * get_unit(self.y)
def offset(self):
"""Return the offset of the model
The offset of the model is the (weighted) mean of the y values.
Note that if self.center_data is False, the offset is 0 by definition.
Returns
-------
offset : scalar
See Also
--------
design_matrix
model
model_parameters
"""
y, dy = strip_units(self.y, self.dy)
if dy is None:
dy = 1
dy = np.broadcast_to(dy, y.shape)
if self.center_data:
w = dy**-2.0
y_mean = np.dot(y, w) / w.sum()
else:
y_mean = 0
return y_mean * get_unit(self.y)
def model_parameters(self, frequency, units=True):
r"""Compute the best-fit model parameters at the given frequency.
The model described by these parameters is:
.. math::
y(t; f, \vec{\theta}) = \theta_0 + \sum_{n=1}^{\tt nterms} [\theta_{2n-1}\sin(2\pi n f t) + \theta_{2n}\cos(2\pi n f t)]
where :math:`\vec{\theta}` is the array of parameters returned by this function.
Parameters
----------
frequency : float
the frequency for the model
units : bool
If True (default), return design matrix with data units.
Returns
-------
theta : np.ndarray (n_parameters,)
The best-fit model parameters at the given frequency.
See Also
--------
design_matrix
model
offset
"""
frequency = self._validate_frequency(frequency)
t, y, dy = strip_units(self._trel, self.y, self.dy)
if self.center_data:
y = y - strip_units(self.offset())
dy = np.ones_like(y) if dy is None else np.asarray(dy)
X = self.design_matrix(frequency)
parameters = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, y / dy))
if units:
parameters = get_unit(self.y) * parameters
return parameters
def design_matrix(self, frequency, t=None):
"""Compute the design matrix for a given frequency
Parameters
----------
frequency : float
the frequency for the model
t : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time` (optional)
Times (length ``n_samples``) at which to compute the model.
If not specified, then the times and uncertainties of the input
data are used.
Returns
-------
X : array
The design matrix for the model at the given frequency.
This should have a shape of (``len(t)``, ``n_parameters``).
See Also
--------
model
model_parameters
offset
"""
if t is None:
t, dy = strip_units(self._trel, self.dy)
else:
t, dy = strip_units(self._validate_t(self._as_relative_time("t", t)), None)
return design_matrix(t, frequency, dy, nterms=self.nterms, bias=self.fit_mean)
def distribution(self, power, cumulative=False):
"""Expected periodogram distribution under the null hypothesis.
This computes the expected probability distribution or cumulative
probability distribution of periodogram power, under the null
hypothesis of a non-varying signal with Gaussian noise. Note that
this is not the same as the expected distribution of peak values;
for that see the ``false_alarm_probability()`` method.
Parameters
----------
power : array-like
The periodogram power at which to compute the distribution.
cumulative : bool, optional
If True, then return the cumulative distribution.
See Also
--------
false_alarm_probability
false_alarm_level
Returns
-------
dist : np.ndarray
The probability density or cumulative probability associated with
the provided powers.
"""
dH = 1 if self.fit_mean or self.center_data else 0
dK = dH + 2 * self.nterms
dist = _statistics.cdf_single if cumulative else _statistics.pdf_single
return dist(power, len(self._trel), self.normalization, dH=dH, dK=dK)
def false_alarm_probability(
self,
power,
method="baluev",
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
method_kwds=None,
):
"""False alarm probability of periodogram maxima under the null hypothesis.
This gives an estimate of the false alarm probability given the height
of the largest peak in the periodogram, based on the null hypothesis
of non-varying data with Gaussian noise.
Parameters
----------
power : array-like
The periodogram value.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use.
maximum_frequency : float
The maximum frequency of the periodogram.
method_kwds : dict, optional
Additional method-specific keywords.
Returns
-------
false_alarm_probability : np.ndarray
The false alarm probability
Notes
-----
The true probability distribution for the largest peak cannot be
determined analytically, so each method here provides an approximation
to the value. The available methods are:
- "baluev" (default): the upper-limit to the alias-free probability,
using the approach of Baluev (2008) [1]_.
- "davies" : the Davies upper bound from Baluev (2008) [1]_.
- "naive" : the approximate probability based on an estimated
effective number of independent frequencies.
- "bootstrap" : the approximate probability based on bootstrap
resamplings of the input data.
Note also that for normalization='psd', the distribution can only be
computed for periodograms constructed with errors specified.
See Also
--------
distribution
false_alarm_level
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if self.nterms != 1:
raise NotImplementedError(
"false alarm probability is not implemented for multiterm periodograms."
)
if not (self.fit_mean or self.center_data):
raise NotImplementedError(
"false alarm probability is implemented "
"only for periodograms of centered data."
)
fmin, fmax = self.autofrequency(
samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
return_freq_limits=True,
)
return _statistics.false_alarm_probability(
power,
fmax=fmax,
t=self._trel,
y=self.y,
dy=self.dy,
normalization=self.normalization,
method=method,
method_kwds=method_kwds,
)
def false_alarm_level(
self,
false_alarm_probability,
method="baluev",
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
method_kwds=None,
):
"""Level of maximum at a given false alarm probability.
This gives an estimate of the periodogram level corresponding to a
specified false alarm probability for the largest peak, assuming a
null hypothesis of non-varying data with Gaussian noise.
Parameters
----------
false_alarm_probability : array-like
The false alarm probability (0 < fap < 1).
maximum_frequency : float
The maximum frequency of the periodogram.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use; default='baluev'.
method_kwds : dict, optional
Additional method-specific keywords.
Returns
-------
power : np.ndarray
The periodogram peak height corresponding to the specified
false alarm probability.
Notes
-----
The true probability distribution for the largest peak cannot be
determined analytically, so each method here provides an approximation
to the value. The available methods are:
- "baluev" (default): the upper-limit to the alias-free probability,
using the approach of Baluev (2008) [1]_.
- "davies" : the Davies upper bound from Baluev (2008) [1]_.
- "naive" : the approximate probability based on an estimated
effective number of independent frequencies.
- "bootstrap" : the approximate probability based on bootstrap
resamplings of the input data. The number of samples can
be set with the method-specific keyword "n_bootstraps" (default=1000).
Note also that for normalization='psd', the distribution can only be
computed for periodograms constructed with errors specified.
See Also
--------
distribution
false_alarm_probability
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if self.nterms != 1:
raise NotImplementedError(
"false alarm probability is not implemented for multiterm periodograms."
)
if not (self.fit_mean or self.center_data):
raise NotImplementedError(
"false alarm probability is implemented "
"only for periodograms of centered data."
)
fmin, fmax = self.autofrequency(
samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
return_freq_limits=True,
)
return _statistics.false_alarm_level(
false_alarm_probability,
fmax=fmax,
t=self._trel,
y=self.y,
dy=self.dy,
normalization=self.normalization,
method=method,
method_kwds=method_kwds,
)
|
0cd7fc74b671426ea6f43ef3daf080d3228389193cb6537bc1462aec494a92ec | import numpy as np
NORMALIZATIONS = ["standard", "psd", "model", "log"]
def compute_chi2_ref(y, dy=None, center_data=True, fit_mean=True):
"""Compute the reference chi-square for a particular dataset.
Note: this is not valid center_data=False and fit_mean=False.
Parameters
----------
y : array-like
data values
dy : float, array, or None, optional
data uncertainties
center_data : bool
specify whether data should be pre-centered
fit_mean : bool
specify whether model should fit the mean of the data
Returns
-------
chi2_ref : float
The reference chi-square for the periodogram of this data
"""
if dy is None:
dy = 1
y, dy = np.broadcast_arrays(y, dy)
w = dy**-2.0
if center_data or fit_mean:
mu = np.dot(w, y) / w.sum()
else:
mu = 0
yw = (y - mu) / dy
return np.dot(yw, yw)
def convert_normalization(Z, N, from_normalization, to_normalization, chi2_ref=None):
"""Convert power from one normalization to another.
This currently only works for standard & floating-mean models.
Parameters
----------
Z : array-like
the periodogram output
N : int
the number of data points
from_normalization, to_normalization : str
the normalization to convert from and to. Options are
['standard', 'model', 'log', 'psd']
chi2_ref : float
The reference chi-square, required for converting to or from the
psd normalization.
Returns
-------
Z_out : ndarray
The periodogram in the new normalization
"""
Z = np.asarray(Z)
from_to = (from_normalization, to_normalization)
for norm in from_to:
if norm not in NORMALIZATIONS:
raise ValueError(f"{from_normalization} is not a valid normalization")
if from_normalization == to_normalization:
return Z
if "psd" in from_to and chi2_ref is None:
raise ValueError(
"must supply reference chi^2 when converting to or from psd normalization"
)
if from_to == ("log", "standard"):
return 1 - np.exp(-Z)
elif from_to == ("standard", "log"):
return -np.log(1 - Z)
elif from_to == ("log", "model"):
return np.exp(Z) - 1
elif from_to == ("model", "log"):
return np.log(Z + 1)
elif from_to == ("model", "standard"):
return Z / (1 + Z)
elif from_to == ("standard", "model"):
return Z / (1 - Z)
elif from_normalization == "psd":
return convert_normalization(
2 / chi2_ref * Z,
N,
from_normalization="standard",
to_normalization=to_normalization,
)
elif to_normalization == "psd":
Z_standard = convert_normalization(
Z, N, from_normalization=from_normalization, to_normalization="standard"
)
return 0.5 * chi2_ref * Z_standard
else:
raise NotImplementedError(
f"conversion from '{from_normalization}' to '{to_normalization}'"
)
|
1530684e80bf7942738fcba0740583f8af3463e59acc7118684d925e3e8676c6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time, TimeDelta
from astropy.timeseries.periodograms.bls import BoxLeastSquares
from astropy.timeseries.periodograms.lombscargle.core import has_units
def assert_allclose_blsresults(blsresult, other, **kwargs):
"""Assert that another BoxLeastSquaresResults object is consistent
This method loops over all attributes and compares the values using
:func:`~astropy.tests.helper.assert_quantity_allclose` function.
Parameters
----------
other : BoxLeastSquaresResults
The other results object to compare.
"""
for k, v in blsresult.items():
if k not in other:
raise AssertionError(f"missing key '{k}'")
if k == "objective":
assert (
v == other[k]
), f"Mismatched objectives. Expected '{v}', got '{other[k]}'"
continue
assert_quantity_allclose(v, other[k], **kwargs)
# NOTE: PR 10644 replaced deprecated usage of RandomState but could not
# find a new seed that did not cause test failure, resorted to hardcoding.
@pytest.fixture
def data():
t = np.array(
[
6.96469186,
2.86139335,
2.26851454,
5.51314769,
7.1946897,
4.2310646,
9.80764198,
6.84829739,
4.80931901,
3.92117518,
3.43178016,
7.29049707,
4.38572245,
0.59677897,
3.98044255,
7.37995406,
1.8249173,
1.75451756,
5.31551374,
5.31827587,
6.34400959,
8.49431794,
7.24455325,
6.11023511,
7.22443383,
3.22958914,
3.61788656,
2.28263231,
2.93714046,
6.30976124,
0.9210494,
4.33701173,
4.30862763,
4.93685098,
4.2583029,
3.12261223,
4.26351307,
8.93389163,
9.44160018,
5.01836676,
6.23952952,
1.15618395,
3.17285482,
4.14826212,
8.66309158,
2.50455365,
4.83034264,
9.85559786,
5.19485119,
6.12894526,
1.20628666,
8.26340801,
6.03060128,
5.45068006,
3.42763834,
3.04120789,
4.17022211,
6.81300766,
8.75456842,
5.10422337,
6.69313783,
5.85936553,
6.24903502,
6.74689051,
8.42342438,
0.83194988,
7.63682841,
2.43666375,
1.94222961,
5.72456957,
0.95712517,
8.85326826,
6.27248972,
7.23416358,
0.16129207,
5.94431879,
5.56785192,
1.58959644,
1.53070515,
6.95529529,
3.18766426,
6.91970296,
5.5438325,
3.88950574,
9.2513249,
8.41669997,
3.57397567,
0.43591464,
3.04768073,
3.98185682,
7.0495883,
9.95358482,
3.55914866,
7.62547814,
5.93176917,
6.91701799,
1.51127452,
3.98876293,
2.40855898,
3.43456014,
5.13128154,
6.6662455,
1.05908485,
1.30894951,
3.21980606,
6.61564337,
8.46506225,
5.53257345,
8.54452488,
3.84837811,
3.16787897,
3.54264676,
1.71081829,
8.29112635,
3.38670846,
5.52370075,
5.78551468,
5.21533059,
0.02688065,
9.88345419,
9.05341576,
2.07635861,
2.92489413,
5.20010153,
9.01911373,
9.83630885,
2.57542064,
5.64359043,
8.06968684,
3.94370054,
7.31073036,
1.61069014,
6.00698568,
8.65864458,
9.83521609,
0.7936579,
4.28347275,
2.0454286,
4.50636491,
5.47763573,
0.9332671,
2.96860775,
9.2758424,
5.69003731,
4.57411998,
7.53525991,
7.41862152,
0.48579033,
7.08697395,
8.39243348,
1.65937884,
7.80997938,
2.86536617,
3.06469753,
6.65261465,
1.11392172,
6.64872449,
8.87856793,
6.96311268,
4.40327877,
4.38214384,
7.65096095,
5.65642001,
0.84904163,
5.82671088,
8.14843703,
3.37066383,
9.2757658,
7.50717,
5.74063825,
7.51643989,
0.79148961,
8.59389076,
8.21504113,
9.0987166,
1.28631198,
0.81780087,
1.38415573,
3.9937871,
4.24306861,
5.62218379,
1.2224355,
2.01399501,
8.11644348,
4.67987574,
8.07938209,
0.07426379,
5.51592726,
9.31932148,
5.82175459,
2.06095727,
7.17757562,
3.7898585,
6.68383947,
0.29319723,
6.35900359,
0.32197935,
7.44780655,
4.72913002,
1.21754355,
5.42635926,
0.66774443,
6.53364871,
9.96086327,
7.69397337,
5.73774114,
1.02635259,
6.99834075,
6.61167867,
0.49097131,
7.92299302,
5.18716591,
4.25867694,
7.88187174,
4.11569223,
4.81026276,
1.81628843,
3.213189,
8.45532997,
1.86903749,
4.17291061,
9.89034507,
2.36599812,
9.16832333,
9.18397468,
0.91296342,
4.63652725,
5.02216335,
3.1366895,
0.47339537,
2.41685637,
0.95529642,
2.38249906,
8.07791086,
8.94978288,
0.43222892,
3.01946836,
9.80582199,
5.39504823,
6.26309362,
0.05545408,
4.84909443,
9.88328535,
3.75185527,
0.97038159,
4.61908762,
9.63004466,
3.41830614,
7.98922733,
7.98846331,
2.08248297,
4.43367702,
7.15601275,
4.10519785,
1.91006955,
9.67494307,
6.50750366,
8.65459852,
2.52423578e-01,
2.66905815,
5.02071100,
6.74486351e-01,
9.93033261,
2.36462396,
3.74292182,
2.14011915,
1.05445866,
2.32479786,
3.00610136,
6.34442268,
2.81234781,
3.62276761,
5.94284372e-02,
3.65719126,
5.33885982,
1.62015837,
5.97433108,
2.93152469,
6.32050495,
2.61966053e-01,
8.87593460,
1.61186304e-01,
1.26958031,
7.77162462,
4.58952322e-01,
7.10998694,
9.71046141,
8.71682933,
7.10161651,
9.58509743,
4.29813338,
8.72878914,
3.55957668,
9.29763653,
1.48777656,
9.40029015,
8.32716197,
8.46054838,
1.23923010,
5.96486898,
1.63924809e-01,
7.21184366,
7.73751413e-02,
8.48222774e-01,
2.25498410,
8.75124534,
3.63576318,
5.39959935,
5.68103214,
2.25463360,
5.72146768,
6.60951795,
2.98245393,
4.18626859,
4.53088925,
9.32350662,
5.87493747,
9.48252372,
5.56034754,
5.00561421,
3.53221097e-02,
4.80889044,
9.27454999,
1.98365689,
5.20911344e-01,
4.06778893,
3.72396481,
8.57153058,
2.66111156e-01,
9.20149230,
6.80902999,
9.04225994,
6.07529071,
8.11953312,
3.35543874,
3.49566228,
3.89874230,
7.54797082,
3.69291174,
2.42219806,
9.37668357,
9.08011084,
3.48797316,
6.34638070,
2.73842212,
2.06115129,
3.36339529,
3.27099893,
8.82276101,
8.22303815,
7.09623229,
9.59345225,
4.22543353,
2.45033039,
1.17398437,
3.01053358,
1.45263734,
9.21860974e-01,
6.02932197,
3.64187450,
5.64570343,
1.91335721,
6.76905860,
2.15505447,
2.78023594,
7.41760422,
5.59737896,
3.34836413,
5.42988783,
6.93984703,
9.12132121,
5.80713213,
2.32686379,
7.46697631,
7.77769018,
2.00401315,
8.20574220,
4.64934855,
7.79766662,
2.37478220,
3.32580270,
9.53697119,
6.57815073,
7.72877831,
6.88374343,
2.04304118,
4.70688748,
8.08963873,
6.75035127,
6.02788565e-02,
8.74077427e-01,
3.46794720,
9.44365540,
4.91190481,
2.70176267,
3.60423719,
2.10652628,
4.21200057,
2.18035440,
8.45752507,
4.56270599,
2.79802018,
9.32891648,
3.14351354,
9.09714662,
4.34180910e-01,
7.07115060,
4.83889039,
4.44221061,
3.63233444e-01,
4.06831905e-01,
3.32753617,
9.47119540,
6.17659977,
3.68874842,
6.11977039,
2.06131536,
1.65066443,
3.61817266,
8.63353352,
5.09401727,
2.96901516,
9.50251625,
8.15966090,
3.22973943,
9.72098245,
9.87351098,
4.08660134,
6.55923103,
4.05653198,
2.57348106,
8.26526760e-01,
2.63610346,
2.71479854,
3.98639080,
1.84886031,
9.53818403,
1.02879885,
6.25208533,
4.41697388,
4.23518049,
3.71991783,
8.68314710,
2.80476981,
2.05761574e-01,
9.18097016,
8.64480278,
2.76901790,
5.23487548,
1.09088197,
9.34270688e-01,
8.37466108,
4.10265718,
6.61716540,
9.43200558,
2.45130592,
1.31598313e-01,
2.41484058e-01,
7.09385692,
9.24551885,
4.67330273,
3.75109148,
5.42860425,
8.58916838,
6.52153874,
2.32979897,
7.74580205,
1.34613497,
1.65559971,
6.12682283,
2.38783406,
7.04778548,
3.49518527,
2.77423960,
9.98918406,
4.06161246e-01,
6.45822522,
3.86995850e-01,
7.60210258,
2.30089957,
8.98318671e-01,
6.48449712,
7.32601217,
6.78095315,
5.19009471e-01,
2.94306946,
4.51088346,
2.87103290,
8.10513456,
1.31115105,
6.12179362,
9.88214944,
9.02556539,
2.22157062,
8.18876137e-04,
9.80597342,
8.82712985,
9.19472466,
4.15503551,
7.44615462,
]
)
y = np.ones_like(t)
dy = np.array(
[
0.00606416,
0.00696152,
0.00925774,
0.00563806,
0.00946933,
0.00748254,
0.00713048,
0.00652823,
0.00958424,
0.00758812,
0.00902013,
0.00928826,
0.00961191,
0.0065169,
0.00669905,
0.00797537,
0.00720662,
0.00966421,
0.00698782,
0.00738889,
0.00808593,
0.0070237,
0.00996239,
0.00549426,
0.00610302,
0.00661328,
0.00573861,
0.0064211,
0.00889623,
0.00761446,
0.00516977,
0.00991311,
0.00808003,
0.0052947,
0.00830584,
0.00689185,
0.00567837,
0.00781832,
0.0086354,
0.00835563,
0.00623757,
0.00762433,
0.00768832,
0.00858402,
0.00679934,
0.00898866,
0.00813961,
0.00519166,
0.0077324,
0.00930956,
0.00783787,
0.00587914,
0.00755188,
0.00878473,
0.00555053,
0.0090855,
0.00583741,
0.00767038,
0.00692872,
0.00624312,
0.00823716,
0.00518696,
0.00880023,
0.0076347,
0.00937886,
0.00760359,
0.00517517,
0.005718,
0.00897802,
0.00745988,
0.0072094,
0.00659217,
0.00642275,
0.00982943,
0.00716485,
0.00942002,
0.00824082,
0.00929214,
0.00926225,
0.00978156,
0.00848971,
0.00902698,
0.00866564,
0.00802613,
0.00858677,
0.00857875,
0.00520454,
0.00758055,
0.00896326,
0.00621481,
0.00732574,
0.00717493,
0.00701394,
0.0056092,
0.00762856,
0.00723124,
0.00831696,
0.00774707,
0.00513771,
0.00515959,
0.0085068,
0.00853791,
0.0097997,
0.00938352,
0.0073403,
0.00812953,
0.00728591,
0.00611473,
0.00688338,
0.00551942,
0.00833264,
0.00596015,
0.00737734,
0.00983718,
0.00515834,
0.00575865,
0.0064929,
0.00970903,
0.00954421,
0.00581,
0.00990559,
0.00875374,
0.00769989,
0.00965851,
0.00940304,
0.00695658,
0.00828172,
0.00823693,
0.00663484,
0.00589695,
0.00733405,
0.00631641,
0.00677533,
0.00977072,
0.00730569,
0.00842446,
0.00668115,
0.00997931,
0.00829384,
0.00598005,
0.00549092,
0.0097159,
0.00972389,
0.00810664,
0.00508496,
0.00612767,
0.00900638,
0.0093773,
0.00726995,
0.0068276,
0.00637113,
0.00558485,
0.00557872,
0.00976301,
0.00904313,
0.0058239,
0.00603525,
0.00827776,
0.00882332,
0.00905157,
0.00581669,
0.00992064,
0.00613901,
0.00794708,
0.00793808,
0.00983681,
0.00828834,
0.00792452,
0.00759386,
0.00882329,
0.00553028,
0.00501046,
0.00976244,
0.00749329,
0.00664168,
0.00684027,
0.00901922,
0.00691185,
0.00885085,
0.00720231,
0.00922039,
0.00538102,
0.00740564,
0.00733425,
0.00632164,
0.00971807,
0.00952514,
0.00721798,
0.0054858,
0.00603392,
0.00635746,
0.0074211,
0.00669189,
0.00887068,
0.00738013,
0.00935185,
0.00997891,
0.00609918,
0.00805836,
0.00923751,
0.00972618,
0.00645043,
0.00863521,
0.00507508,
0.00939571,
0.00531969,
0.00866698,
0.00997305,
0.00750595,
0.00604667,
0.00797322,
0.00812075,
0.00834036,
0.00586306,
0.00949356,
0.00810496,
0.00521784,
0.00842021,
0.00598042,
0.0051367,
0.00775477,
0.00906657,
0.00929971,
0.0055176,
0.00831521,
0.00855038,
0.00647258,
0.00985682,
0.00639344,
0.00534991,
0.0075964,
0.00847157,
0.0062233,
0.00669291,
0.00781814,
0.00943339,
0.00873663,
0.00604796,
0.00625889,
0.0076194,
0.00884479,
0.00809381,
0.00750662,
0.00798563,
0.0087803,
0.0076854,
0.00948876,
0.00973534,
0.00957677,
0.00877259,
0.00623161,
0.00692636,
0.0064,
0.0082883,
0.00662111,
0.00877196,
0.00556755,
0.00887682,
0.00792951,
0.00917694,
0.00715438,
0.00812482,
0.00777206,
0.00987836,
0.00877737,
0.00772407,
0.00587016,
0.00952057,
0.00602919,
0.00825022,
0.00968236,
0.0061179,
0.00612962,
0.00925909,
0.00913828,
0.00675852,
0.00632548,
0.00563694,
0.00993968,
0.00917672,
0.00949696,
0.0075684,
0.00557192,
0.0052629,
0.00665291,
0.00960165,
0.00973791,
0.00920582,
0.0057934,
0.00709962,
0.00623121,
0.00602675,
0.00842413,
0.00743056,
0.00662455,
0.00550107,
0.00772382,
0.00673513,
0.00695548,
0.00655254,
0.00693598,
0.0077793,
0.00507072,
0.00923823,
0.0096096,
0.00775265,
0.00634011,
0.0099512,
0.00691597,
0.00846828,
0.00844976,
0.00717155,
0.00599579,
0.0098329,
0.00531845,
0.00742575,
0.00610365,
0.00646987,
0.00914264,
0.00683633,
0.00541674,
0.00598155,
0.00930187,
0.00988514,
0.00633991,
0.00837704,
0.00540599,
0.00861733,
0.00708218,
0.0095908,
0.00655768,
0.00970733,
0.00751624,
0.00674446,
0.0082351,
0.00624873,
0.00614882,
0.00598173,
0.0097995,
0.00746457,
0.00875807,
0.00736996,
0.0079377,
0.00792069,
0.00989943,
0.00834217,
0.00619885,
0.00507599,
0.00609341,
0.0072776,
0.0069671,
0.00906163,
0.00892778,
0.00544548,
0.00976005,
0.00763728,
0.00798202,
0.00702528,
0.0082475,
0.00935663,
0.00836968,
0.00985049,
0.00850561,
0.0091086,
0.0052252,
0.00836349,
0.00827376,
0.00550873,
0.00921194,
0.00807086,
0.00549164,
0.00797234,
0.00739208,
0.00616647,
0.00509878,
0.00682784,
0.00809926,
0.0066464,
0.00653627,
0.00875561,
0.00879312,
0.00859383,
0.00550591,
0.00758083,
0.00778899,
0.00872402,
0.00951589,
0.00684519,
0.00714332,
0.00866384,
0.00831318,
0.00778935,
0.0067507,
0.00597676,
0.00591904,
0.00540792,
0.005406,
0.00922899,
0.00691836,
0.0053037,
0.00948213,
0.00611635,
0.00634062,
0.00597249,
0.00983751,
0.0055627,
0.00861082,
0.00966044,
0.00834001,
0.00929363,
0.00621224,
0.00836964,
0.00850436,
0.00729166,
0.00935273,
0.00847193,
0.00947439,
0.00876602,
0.00760145,
0.00749344,
0.00726864,
0.00510823,
0.00767571,
0.00711487,
0.00578767,
0.00559535,
0.00724676,
0.00519957,
0.0099329,
0.0068906,
0.00691055,
0.00525563,
0.00713336,
0.00507873,
0.00515047,
0.0066955,
0.00910484,
0.00729411,
0.0050742,
0.0058161,
0.00869961,
0.00869147,
0.00877261,
0.00675835,
0.00676138,
0.00901038,
0.00699069,
0.00863596,
0.00790562,
0.00682171,
0.00540003,
0.00558063,
0.00944779,
0.0072617,
0.00997002,
0.00681948,
0.00624977,
0.0067527,
0.00671543,
0.00818678,
0.00506369,
0.00881634,
0.00708207,
0.0071612,
0.00740558,
0.00724606,
0.00748735,
0.00672952,
0.00726673,
0.00702326,
0.00759121,
0.00811635,
0.0062052,
0.00754219,
0.00797311,
0.00508474,
0.00760247,
0.00619647,
0.00702269,
0.00913265,
0.00663118,
0.00741608,
0.00512371,
0.00654375,
0.00819861,
0.00657581,
0.00602899,
0.00645328,
0.00977189,
0.00543401,
0.00731679,
0.00529193,
0.00769329,
0.00573018,
0.00817042,
0.00632199,
0.00845458,
0.00673573,
0.00502084,
0.00647447,
]
)
period = 2.0
transit_time = 0.5
duration = 0.16
depth = 0.2
m = (
np.abs((t - transit_time + 0.5 * period) % period - 0.5 * period)
< 0.5 * duration
)
y[m] = 1.0 - depth
randn_arr = np.array(
[
-1.00326528e-02,
-8.45644428e-01,
9.11460610e-01,
-1.37449688e00,
-5.47065645e-01,
-7.55266106e-05,
-1.21166803e-01,
-2.00858547e00,
-9.20646543e-01,
1.68234342e-01,
-1.31989156e00,
1.26642930e00,
4.95180889e-01,
-5.14240391e-01,
-2.20292465e-01,
1.86156412e00,
9.35988451e-01,
3.80219145e-01,
-1.41551877e00,
1.62961132e00,
1.05240107e00,
-1.48405388e-01,
-5.49698069e-01,
-1.87903939e-01,
-1.20193668e00,
-4.70785558e-01,
7.63160514e-01,
-1.80762128e00,
-3.14074374e-01,
1.13755973e-01,
1.03568037e-01,
-1.17893695e00,
-1.18215289e00,
1.08916538e00,
-1.22452909e00,
1.00865096e00,
-4.82365315e-01,
1.07979635e00,
-4.21078505e-01,
-1.16647132e00,
8.56554856e-01,
-1.73912222e-02,
1.44857659e00,
8.92200085e-01,
-2.29426629e-01,
-4.49667602e-01,
2.33723433e-02,
1.90210018e-01,
-8.81748527e-01,
8.41939573e-01,
-3.97363492e-01,
-4.23027745e-01,
-5.40688337e-01,
2.31017267e-01,
-6.92052602e-01,
1.34970110e-01,
2.76660307e00,
-5.36094601e-02,
-4.34004738e-01,
-1.66768923e00,
5.02219248e-02,
-1.10923094e00,
-3.75558119e-01,
1.51607594e-01,
-1.73098945e00,
1.57462752e-01,
3.04515175e-01,
-1.29710002e00,
-3.92309192e-01,
-1.83066636e00,
1.57550094e00,
3.30563277e-01,
-1.79588501e-01,
-1.63435831e-01,
1.13144361e00,
-9.41655519e-02,
3.30816771e-01,
1.51862956e00,
-3.46167148e-01,
-1.09263532e00,
-8.24500575e-01,
1.42866383e00,
9.14283085e-02,
-5.02331288e-01,
9.73644380e-01,
9.97957386e-01,
-4.75647768e-01,
-9.71936837e-01,
-1.57052860e00,
-1.79388892e00,
-2.64986452e-01,
-8.93195947e-01,
1.85847441e00,
5.85377547e-02,
-1.94214954e00,
1.41872928e00,
1.61710309e-01,
7.04979480e-01,
6.82034777e-01,
2.96556567e-01,
5.23342630e-01,
2.38760672e-01,
-1.10638591e00,
3.66732198e-01,
1.02390550e00,
-2.10056413e-01,
5.51302218e-01,
4.19589145e-01,
1.81565206e00,
-2.52750301e-01,
-2.92004163e-01,
-1.16931740e-01,
-1.02391075e-01,
-2.27261771e00,
-6.42609841e-01,
2.99885067e-01,
-8.25651467e-03,
-7.99339154e-01,
-6.64779252e-01,
-3.55613128e-01,
-8.01571781e-01,
-5.13050610e-01,
-5.39390119e-01,
8.95370847e-01,
1.01639127e00,
9.33585094e-01,
4.26701799e-01,
-7.08322484e-01,
9.59830450e-01,
-3.14250587e-01,
2.30522083e-02,
1.33822053e00,
8.39928561e-02,
2.47284030e-01,
-1.41277949e00,
4.87009294e-01,
-9.80006647e-01,
1.01193966e00,
-1.84599177e-01,
-2.23616884e00,
-3.58020103e-01,
-2.28034538e-01,
4.85475226e-01,
6.70512391e-01,
-3.27764245e-01,
1.01286819e00,
-3.16705533e00,
-7.13988998e-01,
-1.11236427e00,
-1.25418351e00,
9.59706371e-01,
8.29170399e-01,
-7.75770020e-01,
1.17805700e00,
1.01466892e-01,
-4.21684101e-01,
-6.92922796e-01,
-7.78271726e-01,
4.72774857e-01,
6.50154901e-01,
2.38501212e-01,
-2.05021768e00,
2.96358656e-01,
5.65396564e-01,
-6.69205605e-01,
4.32505429e-02,
-1.86388430e00,
-1.22996906e00,
-3.24235348e-01,
-3.09751144e-01,
3.51679372e-01,
-1.18692539e00,
-3.41206065e-01,
-4.89779780e-01,
5.28010474e-01,
1.42104277e00,
1.72092032e00,
-1.56844005e00,
-4.80141918e-02,
-1.11252931e00,
-6.47449515e-02,
4.22919280e-01,
8.14908987e-02,
-4.90116988e-02,
1.48303917e00,
7.20989392e-01,
-2.72654462e-01,
2.42113609e-02,
8.70897807e-01,
6.09790506e-01,
-4.25076104e-01,
-1.77524284e00,
-1.18465749e00,
1.45979225e-01,
-1.78652685e00,
-1.52394498e-01,
-4.53569176e-01,
9.99252803e-01,
-1.31804382e00,
-1.93176898e00,
-4.19640742e-01,
6.34763132e-01,
1.06991860e00,
-9.09327017e-01,
4.70263748e-01,
-1.11143045e00,
-7.48827466e-01,
5.67594726e-01,
7.18150543e-01,
-9.99380749e-01,
4.74898323e-01,
-1.86849981e00,
-2.02658907e-01,
-1.13424803e00,
-8.07699340e-01,
-1.27607735e00,
5.53626395e-01,
5.53874470e-01,
-6.91200445e-01,
3.75582306e-01,
2.61272553e-01,
-1.28451754e-01,
2.15817020e00,
-8.40878617e-01,
1.43050907e-02,
-3.82387029e-01,
-3.71780015e-01,
1.59412004e-01,
-2.94395700e-01,
-8.60426760e-01,
1.24227498e-01,
1.18233165e00,
9.42766380e-01,
2.03044488e-01,
-7.35396814e-01,
1.86429600e-01,
1.08464302e00,
1.19118926e00,
3.59687060e-01,
-3.64357200e-01,
-2.02752749e-01,
7.72045927e-01,
6.86346215e-01,
-1.75769961e00,
6.58617565e-01,
7.11288340e-01,
-8.87191425e-01,
-7.64981116e-01,
-7.57164098e-01,
-6.80262803e-01,
-1.41674959e00,
3.13091930e-01,
-7.85719399e-01,
-7.03838361e-02,
-4.97568783e-01,
2.55177521e-01,
-1.01061704e00,
2.45265375e-01,
3.89781016e-01,
8.27594585e-01,
1.96776909e00,
-2.09210177e00,
3.20314334e-01,
-7.09162842e-01,
-1.92505867e00,
8.41630623e-01,
1.33219988e00,
-3.91627710e-01,
2.10916296e-01,
-6.40767402e-02,
4.34197668e-01,
8.80535749e-01,
3.44937336e-01,
3.45769929e-01,
1.25973654e00,
-1.64662222e-01,
9.23064571e-01,
-8.22000422e-01,
1.60708495e00,
7.37825392e-01,
-4.03759534e-01,
-2.11454815e00,
-3.10717131e-04,
-1.18180941e00,
2.99634603e-01,
1.45116882e00,
1.60059793e-01,
-1.78012614e-01,
3.42205404e-01,
2.85650196e-01,
-2.36286411e00,
2.40936864e-01,
6.20277356e-01,
-2.59341634e-01,
9.78559078e-01,
-1.27674575e-01,
7.66998762e-01,
2.27310511e00,
-9.63911290e-02,
-1.94213217e00,
-3.36591724e-01,
-1.72589000e00,
6.11237826e-01,
1.30935097e00,
6.95879662e-01,
3.20308213e-01,
-6.44925458e-01,
1.57564975e00,
7.53276212e-01,
2.84469557e-01,
2.04860319e-01,
1.11627359e-01,
4.52216424e-01,
-6.13327179e-01,
1.52524993e00,
1.52339753e-01,
6.00054450e-01,
-4.33567278e-01,
3.74918534e-01,
-2.28175243e00,
-1.11829888e00,
-3.14131532e-02,
-1.32247311e00,
2.43941406e00,
-1.66808131e00,
3.45900749e-01,
1.65577315e00,
4.81287059e-01,
-3.10227553e-01,
-5.52144084e-01,
6.73255489e-01,
-8.00270681e-01,
-1.19486110e-01,
6.91198606e-01,
-3.07879027e-01,
8.75100102e-02,
-3.04086293e-01,
-9.69797604e-01,
1.18915048e00,
1.39306624e00,
-3.16699954e-01,
-2.65576159e-01,
-1.77899339e-01,
5.38803274e-01,
-9.05300265e-01,
-8.85253056e-02,
2.62959055e-01,
6.42042149e-01,
-2.78083727e00,
4.03403210e-01,
3.45846762e-01,
1.00772824e00,
-5.26264015e-01,
-5.18353205e-01,
1.20251659e00,
-1.56315671e00,
1.62909029e00,
2.55589446e00,
4.77451685e-01,
8.14098474e-01,
-1.48958171e00,
-6.94559787e-01,
1.05786255e00,
3.61815347e-01,
-1.81427463e-01,
2.32869132e-01,
5.06976484e-01,
-2.93095701e-01,
-2.89459450e-02,
-3.63073748e-02,
-1.05227898e00,
3.23594628e-01,
1.80358591e00,
1.73196213e00,
-1.47639930e00,
5.70631220e-01,
6.75503781e-01,
-4.10510463e-01,
-9.64200035e-01,
-1.32081431e00,
-4.44703779e-01,
3.50009137e-01,
-1.58058176e-01,
-6.10933088e-01,
-1.24915663e00,
3.50716258e-01,
1.06654245e00,
-9.26921972e-01,
4.48428964e-01,
-1.87947524e00,
-6.57466109e-01,
7.29604120e-01,
-1.11776721e00,
-6.04436725e-01,
1.41796683e00,
-7.32843980e-01,
-8.53944819e-01,
5.75848362e-01,
1.95473356e00,
-2.39669947e-01,
7.68735860e-01,
1.34576918e00,
3.25552163e-01,
-2.69917901e-01,
-8.76326739e-01,
-1.42521096e00,
1.11170175e00,
1.80957146e-01,
1.33280094e00,
9.88925316e-01,
-6.16970520e-01,
-1.18688670e00,
4.12669583e-01,
-6.32506884e-01,
3.76689141e-01,
-7.31151938e-01,
-8.61225253e-01,
-1.40990810e-01,
9.34100620e-01,
3.06539895e-01,
1.17837515e00,
-1.23356170e00,
-1.05707714e00,
-8.91636992e-02,
2.16570138e00,
6.74286114e-01,
-1.06661274e00,
-7.61404530e-02,
2.20714791e-01,
-5.68685746e-01,
6.13274991e-01,
-1.56446138e-01,
-2.99330718e-01,
1.26025679e00,
-1.70966090e00,
-9.61805342e-01,
-8.17308981e-01,
-8.47681070e-01,
-7.28753045e-01,
4.88475958e-01,
1.09653283e00,
9.16041261e-01,
-1.01956213e00,
-1.07417899e-01,
4.52265213e-01,
2.40002952e-01,
1.30574740e00,
-6.75334236e-01,
1.56319421e-01,
-3.93230715e-01,
2.51075019e-01,
-1.07889691e00,
-9.28937721e-01,
-7.30110860e-01,
-5.63669311e-01,
1.54792327e00,
1.17540191e00,
-2.12649671e-01,
1.72933294e-01,
-1.59443602e00,
-1.79292347e-01,
1.59614713e-01,
1.14568421e00,
3.26804720e-01,
4.32890059e-01,
2.97762890e-01,
2.69001190e-01,
-1.39675918e00,
-4.16757668e-01,
1.43488680e00,
8.23896443e-01,
4.94234499e-01,
6.67153092e-02,
6.59441396e-01,
-9.44889409e-01,
-1.58005956e00,
-3.82086552e-01,
5.37923058e-01,
1.07829882e-01,
1.01395868e00,
3.51450517e-01,
4.48421962e-02,
1.32748495e00,
1.13237578e00,
-9.80913012e-02,
-1.10304986e00,
-9.07361492e-01,
-1.61451138e-01,
-3.66811384e-01,
1.65776233e00,
-1.68013415e00,
-6.42577869e-02,
-1.06622649e00,
1.16801869e-01,
3.82264833e-01,
-4.04896974e-01,
5.30481414e-01,
-1.98626941e-01,
-1.79395613e-01,
-4.17888725e-01,
]
)
y += dy * randn_arr
return (
t,
y,
dy,
dict(period=period, transit_time=transit_time, duration=duration, depth=depth),
)
def test_32bit_bug():
rand = np.random.default_rng(42)
t = rand.uniform(0, 10, 500)
y = np.ones_like(t)
y[np.abs((t + 1.0) % 2.0 - 1) < 0.08] = 1.0 - 0.1
y += 0.01 * rand.standard_normal(len(t))
model = BoxLeastSquares(t, y)
results = model.autopower(0.16)
assert_allclose(results.period[np.argmax(results.power)], 2.000412388152837)
periods = np.linspace(1.9, 2.1, 5)
results = model.power(periods, 0.16)
assert_allclose(
results.power,
[0.01723948, 0.0643028, 0.1338783, 0.09428816, 0.03577543],
rtol=1.1e-7,
)
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
def test_correct_model(data, objective):
t, y, dy, params = data
model = BoxLeastSquares(t, y, dy)
periods = np.exp(
np.linspace(
np.log(params["period"]) - 0.1, np.log(params["period"]) + 0.1, 1000
)
)
results = model.power(periods, params["duration"], objective=objective)
ind = np.argmax(results.power)
for k, v in params.items():
assert_allclose(results[k][ind], v, atol=0.01)
chi = (results.depth[ind] - params["depth"]) / results.depth_err[ind]
assert np.abs(chi) < 1
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
@pytest.mark.parametrize("offset", [False, True])
def test_fast_method(data, objective, offset):
t, y, dy, params = data
if offset:
t = t - params["transit_time"] + params["period"]
model = BoxLeastSquares(t, y, dy)
periods = np.exp(
np.linspace(np.log(params["period"]) - 1, np.log(params["period"]) + 1, 10)
)
durations = params["duration"]
results = model.power(periods, durations, objective=objective)
assert_allclose_blsresults(
results, model.power(periods, durations, method="slow", objective=objective)
)
def test_input_units(data):
t, y, dy, params = data
t_unit = u.day
y_unit = u.mag
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y * y_unit, dy * u.one)
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y * u.one, dy * y_unit)
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y, dy * y_unit)
model = BoxLeastSquares(t * t_unit, y * u.one, dy)
assert model.dy.unit == model.y.unit
model = BoxLeastSquares(t * t_unit, y * y_unit, dy)
assert model.dy.unit == model.y.unit
model = BoxLeastSquares(t * t_unit, y * y_unit)
assert model.dy is None
def test_period_units(data):
t, y, dy, params = data
t_unit = u.day
y_unit = u.mag
model = BoxLeastSquares(t * t_unit, y * y_unit, dy)
p = model.autoperiod(params["duration"])
assert p.unit == t_unit
p = model.autoperiod(params["duration"] * 24 * u.hour)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
model.autoperiod(params["duration"] * u.mag)
p = model.autoperiod(params["duration"], minimum_period=0.5)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
p = model.autoperiod(params["duration"], minimum_period=0.5 * u.mag)
p = model.autoperiod(params["duration"], maximum_period=0.5)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
p = model.autoperiod(params["duration"], maximum_period=0.5 * u.mag)
p = model.autoperiod(params["duration"], minimum_period=0.5, maximum_period=1.5)
p2 = model.autoperiod(params["duration"], maximum_period=0.5, minimum_period=1.5)
assert_quantity_allclose(p, p2)
@pytest.mark.parametrize("method", ["fast", "slow"])
@pytest.mark.parametrize("with_err", [True, False])
@pytest.mark.parametrize("t_unit", [None, u.day])
@pytest.mark.parametrize("y_unit", [None, u.mag])
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
def test_results_units(data, method, with_err, t_unit, y_unit, objective):
t, y, dy, params = data
periods = np.linspace(params["period"] - 1.0, params["period"] + 1.0, 3)
if t_unit is not None:
t = t * t_unit
if y_unit is not None:
y = y * y_unit
dy = dy * y_unit
if not with_err:
dy = None
model = BoxLeastSquares(t, y, dy)
results = model.power(
periods, params["duration"], method=method, objective=objective
)
if t_unit is None:
assert not has_units(results.period)
assert not has_units(results.duration)
assert not has_units(results.transit_time)
else:
assert results.period.unit == t_unit
assert results.duration.unit == t_unit
assert results.transit_time.unit == t_unit
if y_unit is None:
assert not has_units(results.power)
assert not has_units(results.depth)
assert not has_units(results.depth_err)
assert not has_units(results.depth_snr)
assert not has_units(results.log_likelihood)
else:
assert results.depth.unit == y_unit
assert results.depth_err.unit == y_unit
assert results.depth_snr.unit == u.one
if dy is None:
assert results.log_likelihood.unit == y_unit * y_unit
if objective == "snr":
assert results.power.unit == u.one
else:
assert results.power.unit == y_unit * y_unit
else:
assert results.log_likelihood.unit == u.one
assert results.power.unit == u.one
def test_autopower(data):
t, y, dy, params = data
duration = params["duration"] + np.linspace(-0.1, 0.1, 3)
model = BoxLeastSquares(t, y, dy)
period = model.autoperiod(duration)
results1 = model.power(period, duration)
results2 = model.autopower(duration)
assert_allclose_blsresults(results1, results2)
@pytest.mark.parametrize("with_units", [True, False])
def test_model(data, with_units):
t, y, dy, params = data
# Compute the model using linear regression
A = np.zeros((len(t), 2))
p = params["period"]
dt = np.abs((t - params["transit_time"] + 0.5 * p) % p - 0.5 * p)
m_in = dt < 0.5 * params["duration"]
A[~m_in, 0] = 1.0
A[m_in, 1] = 1.0
w = np.linalg.solve(np.dot(A.T, A / dy[:, None] ** 2), np.dot(A.T, y / dy**2))
model_true = np.dot(A, w)
if with_units:
t = t * u.day
y = y * u.mag
dy = dy * u.mag
model_true = model_true * u.mag
# Compute the model using the periodogram
pgram = BoxLeastSquares(t, y, dy)
model = pgram.model(t, p, params["duration"], params["transit_time"])
# Make sure that the transit mask is consistent with the model
transit_mask = pgram.transit_mask(t, p, params["duration"], params["transit_time"])
transit_mask0 = (model - model.max()) < 0.0
assert_allclose(transit_mask, transit_mask0)
assert_quantity_allclose(model, model_true)
@pytest.mark.parametrize("shape", [(1,), (2,), (3,), (2, 3)])
def test_shapes(data, shape):
t, y, dy, params = data
duration = params["duration"]
model = BoxLeastSquares(t, y, dy)
period = np.empty(shape)
period.flat = np.linspace(params["period"] - 1, params["period"] + 1, period.size)
if len(period.shape) > 1:
with pytest.raises(ValueError):
results = model.power(period, duration)
else:
results = model.power(period, duration)
for k, v in results.items():
if k == "objective":
continue
assert v.shape == shape
@pytest.mark.parametrize("with_units", [True, False])
@pytest.mark.parametrize("with_err", [True, False])
def test_compute_stats(data, with_units, with_err):
t, y, dy, params = data
y_unit = 1
if with_units:
y_unit = u.mag
t = t * u.day
y = y * u.mag
dy = dy * u.mag
params["period"] = params["period"] * u.day
params["duration"] = params["duration"] * u.day
params["transit_time"] = params["transit_time"] * u.day
params["depth"] = params["depth"] * u.mag
if not with_err:
dy = None
model = BoxLeastSquares(t, y, dy)
results = model.power(params["period"], params["duration"], oversample=1000)
stats = model.compute_stats(
params["period"], params["duration"], params["transit_time"]
)
# Test the calculated transit times
tt = params["period"] * np.arange(int(t.max() / params["period"]) + 1)
tt += params["transit_time"]
assert_quantity_allclose(tt, stats["transit_times"])
# Test that the other parameters are consistent with the periodogram
assert_allclose(stats["per_transit_count"], [9, 7, 7, 7, 8])
assert_quantity_allclose(
np.sum(stats["per_transit_log_likelihood"]), results["log_likelihood"]
)
assert_quantity_allclose(stats["depth"][0], results["depth"])
# Check the half period result
results_half = model.power(
0.5 * params["period"], params["duration"], oversample=1000
)
assert_quantity_allclose(stats["depth_half"][0], results_half["depth"])
# Skip the uncertainty tests when the input errors are None
if not with_err:
assert_quantity_allclose(
stats["harmonic_amplitude"], 0.029945029964964204 * y_unit
)
assert_quantity_allclose(
stats["harmonic_delta_log_likelihood"],
-0.5875918155223113 * y_unit * y_unit,
)
return
assert_quantity_allclose(stats["harmonic_amplitude"], 0.033027988742275853 * y_unit)
assert_quantity_allclose(
stats["harmonic_delta_log_likelihood"], -12407.505922833765
)
assert_quantity_allclose(stats["depth"][1], results["depth_err"])
assert_quantity_allclose(stats["depth_half"][1], results_half["depth_err"])
for f, k in zip(
(1.0, 1.0, 1.0, 0.0), ("depth", "depth_even", "depth_odd", "depth_phased")
):
res = np.abs((stats[k][0] - f * params["depth"]) / stats[k][1])
assert res < 1, f"f={f}, k={k}, res={res}"
def test_negative_times(data):
t, y, dy, params = data
mu = np.mean(t)
duration = params["duration"] + np.linspace(-0.1, 0.1, 3)
model1 = BoxLeastSquares(t, y, dy)
results1 = model1.autopower(duration)
# Compute the periodogram with offset (negative) times
model2 = BoxLeastSquares(t - mu, y, dy)
results2 = model2.autopower(duration)
# Shift the transit times back into the unshifted coordinates
results2.transit_time = (results2.transit_time + mu) % results2.period
assert_allclose_blsresults(results1, results2)
@pytest.mark.parametrize("timedelta", [False, True])
def test_absolute_times(data, timedelta):
# Make sure that we handle absolute times correctly. We also check that
# TimeDelta works properly when timedelta is True.
# The example data uses relative times
t, y, dy, params = data
# Add units
t = t * u.day
y = y * u.mag
dy = dy * u.mag
# We now construct a set of absolute times but keeping the rest the same.
start = Time("2019-05-04T12:34:56")
trel = TimeDelta(t) if timedelta else t
t = trel + start
# and we set up two instances of BoxLeastSquares, one with absolute and one
# with relative times.
bls1 = BoxLeastSquares(t, y, dy)
bls2 = BoxLeastSquares(trel, y, dy)
results1 = bls1.autopower(0.16 * u.day)
results2 = bls2.autopower(0.16 * u.day)
# All the results should match except transit time which should be
# absolute instead of relative in the first case.
for key in results1:
if key == "transit_time":
assert_quantity_allclose((results1[key] - start).to(u.day), results2[key])
elif key == "objective":
assert results1[key] == results2[key]
else:
assert_allclose(results1[key], results2[key])
# Check that model evaluation works fine
model1 = bls1.model(t, 0.2 * u.day, 0.05 * u.day, Time("2019-06-04T12:34:56"))
model2 = bls2.model(trel, 0.2 * u.day, 0.05 * u.day, TimeDelta(1 * u.day))
assert_quantity_allclose(model1, model2)
# Check model validation
MESSAGE = (
r"{} was provided as {} time but the BoxLeastSquares class was initialized with"
r" {} times\."
)
with pytest.raises(
TypeError, match=MESSAGE.format("transit_time", "a relative", "absolute")
):
bls1.model(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
with pytest.raises(
TypeError, match=MESSAGE.format("t_model", "a relative", "absolute")
):
bls1.model(trel, 0.2 * u.day, 0.05 * u.day, Time("2019-06-04T12:34:56"))
with pytest.raises(
TypeError, match=MESSAGE.format("transit_time", "an absolute", "relative")
):
bls2.model(trel, 0.2 * u.day, 0.05 * u.day, Time("2019-06-04T12:34:56"))
with pytest.raises(
TypeError, match=MESSAGE.format("t_model", "an absolute", "relative")
):
bls2.model(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
# Check compute_stats
stats1 = bls1.compute_stats(0.2 * u.day, 0.05 * u.day, Time("2019-06-04T12:34:56"))
stats2 = bls2.compute_stats(0.2 * u.day, 0.05 * u.day, 1 * u.day)
for key in stats1:
if key == "transit_times":
assert_quantity_allclose(
(stats1[key] - start).to(u.day), stats2[key], atol=1e-10 * u.day
)
elif key.startswith("depth"):
for value1, value2 in zip(stats1[key], stats2[key]):
assert_quantity_allclose(value1, value2)
else:
assert_allclose(stats1[key], stats2[key])
# Check compute_stats validation
MESSAGE = (
r"{} was provided as {} time but the BoxLeastSquares class was"
r" initialized with {} times\."
)
with pytest.raises(
TypeError, match=MESSAGE.format("transit_time", "a relative", "absolute")
):
bls1.compute_stats(0.2 * u.day, 0.05 * u.day, 1 * u.day)
with pytest.raises(
TypeError, match=MESSAGE.format("transit_time", "an absolute", "relative")
):
bls2.compute_stats(0.2 * u.day, 0.05 * u.day, Time("2019-06-04T12:34:56"))
# Check transit_mask
mask1 = bls1.transit_mask(t, 0.2 * u.day, 0.05 * u.day, Time("2019-06-04T12:34:56"))
mask2 = bls2.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert_equal(mask1, mask2)
# Check transit_mask validation
with pytest.raises(
TypeError, match=MESSAGE.format("transit_time", "a relative", "absolute")
):
bls1.transit_mask(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
with pytest.raises(TypeError, match=MESSAGE.format("t", "a relative", "absolute")):
bls1.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, Time("2019-06-04T12:34:56"))
with pytest.raises(
TypeError, match=MESSAGE.format("transit_time", "an absolute", "relative")
):
bls2.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, Time("2019-06-04T12:34:56"))
with pytest.raises(TypeError, match=MESSAGE.format("t", "an absolute", "relative")):
bls2.transit_mask(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
def test_transit_time_in_range(data):
t, y, dy, params = data
t_ref = 10230.0
t2 = t + t_ref
bls1 = BoxLeastSquares(t, y, dy)
bls2 = BoxLeastSquares(t2, y, dy)
results1 = bls1.autopower(0.16)
results2 = bls2.autopower(0.16)
assert np.allclose(results1.transit_time, results2.transit_time - t_ref)
assert np.all(results1.transit_time >= t.min())
assert np.all(results1.transit_time <= t.max())
assert np.all(results2.transit_time >= t2.min())
assert np.all(results2.transit_time <= t2.max())
|
1a626db0ba5d3ef2c28310ab99090e7391922732bbee1e0fa7ee0cf856999178 | import numpy as np
from .mle import design_matrix
def lombscargle_chi2(
t,
y,
dy,
frequency,
normalization="standard",
fit_mean=True,
center_data=True,
nterms=1,
):
"""Lomb-Scargle Periodogram
This implements a chi-squared-based periodogram, which is relatively slow
but useful for validating the faster algorithms in the package.
Parameters
----------
t, y, dy : array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity``.
frequency : array-like
frequencies (not angular frequencies) at which to calculate periodogram
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
fit_mean : bool, optional
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if ``fit_mean = False``
nterms : int, optional
Number of Fourier terms in the fit
Returns
-------
power : array-like
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
References
----------
.. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [2] W. Press et al, Numerical Recipes in C (2002)
.. [3] Scargle, J.D. 1982, ApJ 263:835-853
"""
if dy is None:
dy = 1
t, y, dy = np.broadcast_arrays(t, y, dy)
frequency = np.asarray(frequency)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
if frequency.ndim != 1:
raise ValueError("frequency should be one-dimensional")
w = dy**-2.0
w /= w.sum()
# if fit_mean is true, centering the data now simplifies the math below.
if center_data or fit_mean:
yw = (y - np.dot(w, y)) / dy
else:
yw = y / dy
chi2_ref = np.dot(yw, yw)
# compute the unnormalized model chi2 at each frequency
def compute_power(f):
X = design_matrix(t, f, dy=dy, bias=fit_mean, nterms=nterms)
XTX = np.dot(X.T, X)
XTy = np.dot(X.T, yw)
return np.dot(XTy.T, np.linalg.solve(XTX, XTy))
p = np.array([compute_power(f) for f in frequency])
if normalization == "psd":
p *= 0.5
elif normalization == "model":
p /= chi2_ref - p
elif normalization == "log":
p = -np.log(1 - p / chi2_ref)
elif normalization == "standard":
p /= chi2_ref
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return p
|
197953484a1a5dcc4ba5ad0e973da8137073dce587196fec2d22e527612cf49a | """
Main Lomb-Scargle Implementation
The ``lombscargle`` function here is essentially a sophisticated switch
statement for the various implementations available in this submodule
"""
__all__ = ["lombscargle", "available_methods"]
import numpy as np
from .chi2_impl import lombscargle_chi2
from .cython_impl import lombscargle_cython
from .fast_impl import lombscargle_fast
from .fastchi2_impl import lombscargle_fastchi2
from .scipy_impl import lombscargle_scipy
from .slow_impl import lombscargle_slow
METHODS = {
"slow": lombscargle_slow,
"fast": lombscargle_fast,
"chi2": lombscargle_chi2,
"scipy": lombscargle_scipy,
"fastchi2": lombscargle_fastchi2,
"cython": lombscargle_cython,
}
def available_methods():
methods = ["auto", "slow", "chi2", "cython", "fast", "fastchi2"]
# Scipy required for scipy algorithm (obviously)
try:
import scipy # noqa: F401
except ImportError:
pass
else:
methods.append("scipy")
return methods
def _is_regular(frequency):
frequency = np.asarray(frequency)
if frequency.ndim != 1:
return False
elif len(frequency) == 1:
return True
else:
diff = np.diff(frequency)
return np.allclose(diff[0], diff)
def _get_frequency_grid(frequency, assume_regular_frequency=False):
"""Utility to get grid parameters from a frequency array
Parameters
----------
frequency : array-like or `~astropy.units.Quantity` ['frequency']
input frequency grid
assume_regular_frequency : bool (default = False)
if True, then do not check whether frequency is a regular grid
Returns
-------
f0, df, N : scalar
Parameters such that all(frequency == f0 + df * np.arange(N))
"""
frequency = np.asarray(frequency)
if frequency.ndim != 1:
raise ValueError("frequency grid must be 1 dimensional")
elif len(frequency) == 1:
return frequency[0], frequency[0], 1
elif not (assume_regular_frequency or _is_regular(frequency)):
raise ValueError("frequency must be a regular grid")
return frequency[0], frequency[1] - frequency[0], len(frequency)
def validate_method(method, dy, fit_mean, nterms, frequency, assume_regular_frequency):
"""
Validate the method argument, and if method='auto'
choose the appropriate method
"""
methods = available_methods()
prefer_fast = len(frequency) > 200 and (
assume_regular_frequency or _is_regular(frequency)
)
prefer_scipy = "scipy" in methods and dy is None and not fit_mean
# automatically choose the appropriate method
if method == "auto":
if nterms != 1:
if prefer_fast:
method = "fastchi2"
else:
method = "chi2"
elif prefer_fast:
method = "fast"
elif prefer_scipy:
method = "scipy"
else:
method = "cython"
if method not in METHODS:
raise ValueError(f"invalid method: {method}")
return method
def lombscargle(
t,
y,
dy=None,
frequency=None,
method="auto",
assume_regular_frequency=False,
normalization="standard",
fit_mean=True,
center_data=True,
method_kwds=None,
nterms=1,
):
"""
Compute the Lomb-scargle Periodogram with a given method.
Parameters
----------
t : array-like
sequence of observation times
y : array-like
sequence of observations associated with times t
dy : float or array-like, optional
error or sequence of observational errors associated with times t
frequency : array-like
frequencies (not angular frequencies) at which to evaluate the
periodogram. If not specified, optimal frequencies will be chosen using
a heuristic which will attempt to provide sufficient frequency range
and sampling so that peaks will not be missed. Note that in order to
use method='fast', frequencies must be regularly spaced.
method : str, optional
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- `slow`: use the O[N^2] pure-python implementation
- `chi2`: use the O[N^2] chi2/linear-fitting implementation
- `fastchi2`: use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless `assume_regular_frequency` is set to True.
- `scipy`: use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
assume_regular_frequency : bool, optional
if True, assume that the input frequency is of the form
freq = f0 + df * np.arange(N). Only referenced if method is 'auto'
or 'fast'.
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard' or 'psd'.
fit_mean : bool, optional
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if `fit_mean = False`
method_kwds : dict, optional
additional keywords to pass to the lomb-scargle method
nterms : int, optional
number of Fourier terms to use in the periodogram.
Not supported with every method.
Returns
-------
PLS : array-like
Lomb-Scargle power associated with each frequency omega
"""
# frequencies should be one-dimensional arrays
output_shape = frequency.shape
frequency = frequency.ravel()
# we'll need to adjust args and kwds for each method
args = (t, y, dy)
kwds = dict(
frequency=frequency,
center_data=center_data,
fit_mean=fit_mean,
normalization=normalization,
nterms=nterms,
**(method_kwds or {}),
)
method = validate_method(
method,
dy=dy,
fit_mean=fit_mean,
nterms=nterms,
frequency=frequency,
assume_regular_frequency=assume_regular_frequency,
)
# scipy doesn't support dy or fit_mean=True
if method == "scipy":
if kwds.pop("fit_mean"):
raise ValueError("scipy method does not support fit_mean=True")
if dy is not None:
dy = np.ravel(np.asarray(dy))
if not np.allclose(dy[0], dy):
raise ValueError("scipy method only supports uniform uncertainties dy")
args = (t, y)
# fast methods require frequency expressed as a grid
if method.startswith("fast"):
f0, df, Nf = _get_frequency_grid(
kwds.pop("frequency"), assume_regular_frequency
)
kwds.update(f0=f0, df=df, Nf=Nf)
# only chi2 methods support nterms
if not method.endswith("chi2"):
if kwds.pop("nterms") != 1:
raise ValueError(
"nterms != 1 only supported with 'chi2' or 'fastchi2' methods"
)
PLS = METHODS[method](*args, **kwds)
return PLS.reshape(output_shape)
|
b5d7ba97f1f072640da9519356933e3e7e9d7ebb2e8bb23fdfd4b1cbd1484553 | """Various implementations of the Lomb-Scargle Periodogram"""
from .chi2_impl import lombscargle_chi2
from .fast_impl import lombscargle_fast
from .fastchi2_impl import lombscargle_fastchi2
from .main import available_methods, lombscargle
from .scipy_impl import lombscargle_scipy
from .slow_impl import lombscargle_slow
|
c381fc947ab952fb2c509401c23fa371c97f7b646e899c0a70fac917df67c258 | import numpy as np
from .utils import trig_sum
def lombscargle_fast(
t,
y,
dy,
f0,
df,
Nf,
center_data=True,
fit_mean=True,
normalization="standard",
use_fft=True,
trig_sum_kwds=None,
):
"""Fast Lomb-Scargle Periodogram
This implements the Press & Rybicki method [1]_ for fast O[N log(N)]
Lomb-Scargle periodograms.
Parameters
----------
t, y, dy : array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity`.
f0, df, Nf : (float, float, int)
parameters describing the frequency grid, f = f0 + df * arange(Nf).
center_data : bool (default=True)
Specify whether to subtract the mean of the data before the fit
fit_mean : bool (default=True)
If True, then compute the floating-mean periodogram; i.e. let the mean
vary with the fit.
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
use_fft : bool (default=True)
If True, then use the Press & Rybicki O[NlogN] algorithm to compute
the result. Otherwise, use a slower O[N^2] algorithm
trig_sum_kwds : dict or None, optional
extra keyword arguments to pass to the ``trig_sum`` utility.
Options are ``oversampling`` and ``Mfft``. See documentation
of ``trig_sum`` for details.
Returns
-------
power : ndarray
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
Notes
-----
Note that the ``use_fft=True`` algorithm is an approximation to the true
Lomb-Scargle periodogram, and as the number of points grows this
approximation improves. On the other hand, for very small datasets
(<~50 points or so) this approximation may not be useful.
References
----------
.. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis
of unevenly sampled data". ApJ 1:338, p277, 1989
.. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [3] W. Press et al, Numerical Recipes in C (2002)
"""
if dy is None:
dy = 1
# Validate and setup input data
t, y, dy = np.broadcast_arrays(t, y, dy)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
# Validate and setup frequency grid
if f0 < 0:
raise ValueError("Frequencies must be positive")
if df <= 0:
raise ValueError("Frequency steps must be positive")
if Nf <= 0:
raise ValueError("Number of frequencies must be positive")
w = dy**-2.0
w /= w.sum()
# Center the data. Even if we're fitting the offset,
# this step makes the expressions below more succinct
if center_data or fit_mean:
y = y - np.dot(w, y)
# set up arguments to trig_sum
kwargs = dict.copy(trig_sum_kwds or {})
kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf)
# ----------------------------------------------------------------------
# 1. compute functions of the time-shift tau at each frequency
Sh, Ch = trig_sum(t, w * y, **kwargs)
S2, C2 = trig_sum(t, w, freq_factor=2, **kwargs)
if fit_mean:
S, C = trig_sum(t, w, **kwargs)
tan_2omega_tau = (S2 - 2 * S * C) / (C2 - (C * C - S * S))
else:
tan_2omega_tau = S2 / C2
# This is what we're computing below; the straightforward way is slower
# and less stable, so we use trig identities instead
#
# omega_tau = 0.5 * np.arctan(tan_2omega_tau)
# S2w, C2w = np.sin(2 * omega_tau), np.cos(2 * omega_tau)
# Sw, Cw = np.sin(omega_tau), np.cos(omega_tau)
S2w = tan_2omega_tau / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
C2w = 1 / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
Cw = np.sqrt(0.5) * np.sqrt(1 + C2w)
Sw = np.sqrt(0.5) * np.sign(S2w) * np.sqrt(1 - C2w)
# ----------------------------------------------------------------------
# 2. Compute the periodogram, following Zechmeister & Kurster
# and using tricks from Press & Rybicki.
YY = np.dot(w, y**2)
YC = Ch * Cw + Sh * Sw
YS = Sh * Cw - Ch * Sw
CC = 0.5 * (1 + C2 * C2w + S2 * S2w)
SS = 0.5 * (1 - C2 * C2w - S2 * S2w)
if fit_mean:
CC -= (C * Cw + S * Sw) ** 2
SS -= (S * Cw - C * Sw) ** 2
power = YC * YC / CC + YS * YS / SS
if normalization == "standard":
power /= YY
elif normalization == "model":
power /= YY - power
elif normalization == "log":
power = -np.log(1 - power / YY)
elif normalization == "psd":
power *= 0.5 * (dy**-2.0).sum()
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return power
|
83d0d9853f9ccd36f6a73862baf642b179c786e6a817164e66f5a8484e6f0812 | import numpy as np
def lombscargle_scipy(t, y, frequency, normalization="standard", center_data=True):
"""Lomb-Scargle Periodogram
This is a wrapper of ``scipy.signal.lombscargle`` for computation of the
Lomb-Scargle periodogram. This is a relatively fast version of the naive
O[N^2] algorithm, but cannot handle heteroskedastic errors.
Parameters
----------
t, y : array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity`.
frequency : array-like
frequencies (not angular frequencies) at which to calculate periodogram
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data.
Returns
-------
power : array-like
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
References
----------
.. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [2] W. Press et al, Numerical Recipes in C (2002)
.. [3] Scargle, J.D. 1982, ApJ 263:835-853
"""
try:
from scipy import signal
except ImportError:
raise ImportError("scipy must be installed to use lombscargle_scipy")
t, y = np.broadcast_arrays(t, y)
# Scipy requires floating-point input
t = np.asarray(t, dtype=float)
y = np.asarray(y, dtype=float)
frequency = np.asarray(frequency, dtype=float)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
if frequency.ndim != 1:
raise ValueError("frequency should be one-dimensional")
if center_data:
y = y - y.mean()
# Note: scipy input accepts angular frequencies
p = signal.lombscargle(t, y, 2 * np.pi * frequency)
if normalization == "psd":
pass
elif normalization == "standard":
p *= 2 / (t.size * np.mean(y**2))
elif normalization == "log":
p = -np.log(1 - 2 * p / (t.size * np.mean(y**2)))
elif normalization == "model":
p /= 0.5 * t.size * np.mean(y**2) - p
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return p
|
6c689d5172d1a426bda2df7ce14a597025838796933935ddebd0754fa7fafe37 | from math import factorial
import numpy as np
def bitceil(N):
"""
Find the bit (i.e. power of 2) immediately greater than or equal to N
Note: this works for numbers up to 2 ** 64.
Roughly equivalent to int(2 ** np.ceil(np.log2(N)))
"""
return 1 << int(N - 1).bit_length()
def extirpolate(x, y, N=None, M=4):
"""
Extirpolate the values (x, y) onto an integer grid range(N),
using lagrange polynomial weights on the M nearest points.
Parameters
----------
x : array-like
array of abscissas
y : array-like
array of ordinates
N : int
number of integer bins to use. For best performance, N should be larger
than the maximum of x
M : int
number of adjoining points on which to extirpolate.
Returns
-------
yN : ndarray
N extirpolated values associated with range(N)
Examples
--------
>>> rng = np.random.default_rng(0)
>>> x = 100 * rng.random(20)
>>> y = np.sin(x)
>>> y_hat = extirpolate(x, y)
>>> x_hat = np.arange(len(y_hat))
>>> f = lambda x: np.sin(x / 10)
>>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat)))
True
Notes
-----
This code is based on the C implementation of spread() presented in
Numerical Recipes in C, Second Edition (Press et al. 1989; p.583).
"""
x, y = map(np.ravel, np.broadcast_arrays(x, y))
if N is None:
N = int(np.max(x) + 0.5 * M + 1)
# Now use legendre polynomial weights to populate the results array;
# This is an efficient recursive implementation (See Press et al. 1989)
result = np.zeros(N, dtype=y.dtype)
# first take care of the easy cases where x is an integer
integers = x % 1 == 0
np.add.at(result, x[integers].astype(int), y[integers])
x, y = x[~integers], y[~integers]
# For each remaining x, find the index describing the extirpolation range.
# i.e. ilo[i] < x[i] < ilo[i] + M with x[i] in the center,
# adjusted so that the limits are within the range 0...N
ilo = np.clip((x - M // 2).astype(int), 0, N - M)
numerator = y * np.prod(x - ilo - np.arange(M)[:, np.newaxis], 0)
denominator = factorial(M - 1)
for j in range(M):
if j > 0:
denominator *= j / (j - M)
ind = ilo + (M - 1 - j)
np.add.at(result, ind, numerator / (denominator * (x - ind)))
return result
def trig_sum(t, h, df, N, f0=0, freq_factor=1, oversampling=5, use_fft=True, Mfft=4):
"""Compute (approximate) trigonometric sums for a number of frequencies
This routine computes weighted sine and cosine sums::
S_j = sum_i { h_i * sin(2 pi * f_j * t_i) }
C_j = sum_i { h_i * cos(2 pi * f_j * t_i) }
Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N.
The sums can be computed either by a brute force O[N^2] method, or
by an FFT-based O[Nlog(N)] method.
Parameters
----------
t : array-like
array of input times
h : array-like
array weights for the sum
df : float
frequency spacing
N : int
number of frequency bins to return
f0 : float, optional
The low frequency to use
freq_factor : float, optional
Factor which multiplies the frequency
use_fft : bool
if True, use the approximate FFT algorithm to compute the result.
This uses the FFT with Press & Rybicki's Lagrangian extirpolation.
oversampling : int (default = 5)
oversampling freq_factor for the approximation; roughly the number of
time samples across the highest-frequency sinusoid. This parameter
contains the trade-off between accuracy and speed. Not referenced
if use_fft is False.
Mfft : int
The number of adjacent points to use in the FFT approximation.
Not referenced if use_fft is False.
Returns
-------
S, C : ndarray
summation arrays for frequencies f = df * np.arange(1, N + 1)
"""
df *= freq_factor
f0 *= freq_factor
if df <= 0:
raise ValueError("df must be positive")
t, h = map(np.ravel, np.broadcast_arrays(t, h))
if use_fft:
Mfft = int(Mfft)
if Mfft <= 0:
raise ValueError("Mfft must be positive")
# required size of fft is the power of 2 above the oversampling rate
Nfft = bitceil(N * oversampling)
t0 = t.min()
if f0 > 0:
h = h * np.exp(2j * np.pi * f0 * (t - t0))
tnorm = ((t - t0) * Nfft * df) % Nfft
grid = extirpolate(tnorm, h, Nfft, Mfft)
fftgrid = np.fft.ifft(grid)[:N]
if t0 != 0:
f = f0 + df * np.arange(N)
fftgrid *= np.exp(2j * np.pi * t0 * f)
C = Nfft * fftgrid.real
S = Nfft * fftgrid.imag
else:
f = f0 + df * np.arange(N)
C = np.dot(h, np.cos(2 * np.pi * f * t[:, np.newaxis]))
S = np.dot(h, np.sin(2 * np.pi * f * t[:, np.newaxis]))
return S, C
|
62f3ef0efe3964697a3d70c1c676f86a9f3d3e28bda7e62b22ef9faef0df1a99 | import numpy as np
def lombscargle_slow(
t, y, dy, frequency, normalization="standard", fit_mean=True, center_data=True
):
"""Lomb-Scargle Periodogram
This is a pure-python implementation of the original Lomb-Scargle formalism
(e.g. [1]_, [2]_), with the addition of the floating mean (e.g. [3]_)
Parameters
----------
t, y, dy : array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity`.
frequency : array-like
frequencies (not angular frequencies) at which to calculate periodogram
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
fit_mean : bool, optional
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if ``fit_mean = False``
Returns
-------
power : array-like
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
References
----------
.. [1] W. Press et al, Numerical Recipes in C (2002)
.. [2] Scargle, J.D. 1982, ApJ 263:835-853
.. [3] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
"""
if dy is None:
dy = 1
t, y, dy = np.broadcast_arrays(t, y, dy)
frequency = np.asarray(frequency)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
if frequency.ndim != 1:
raise ValueError("frequency should be one-dimensional")
w = dy**-2.0
w /= w.sum()
# if fit_mean is true, centering the data now simplifies the math below.
if fit_mean or center_data:
y = y - np.dot(w, y)
omega = 2 * np.pi * frequency
omega = omega.ravel()[np.newaxis, :]
# make following arrays into column vectors
t, y, dy, w = map(lambda x: x[:, np.newaxis], (t, y, dy, w))
sin_omega_t = np.sin(omega * t)
cos_omega_t = np.cos(omega * t)
# compute time-shift tau
# S2 = np.dot(w.T, np.sin(2 * omega * t)
S2 = 2 * np.dot(w.T, sin_omega_t * cos_omega_t)
# C2 = np.dot(w.T, np.cos(2 * omega * t)
C2 = 2 * np.dot(w.T, 0.5 - sin_omega_t**2)
if fit_mean:
S = np.dot(w.T, sin_omega_t)
C = np.dot(w.T, cos_omega_t)
S2 -= 2 * S * C
C2 -= C * C - S * S
# compute components needed for the fit
omega_t_tau = omega * t - 0.5 * np.arctan2(S2, C2)
sin_omega_t_tau = np.sin(omega_t_tau)
cos_omega_t_tau = np.cos(omega_t_tau)
Y = np.dot(w.T, y)
wy = w * y
YCtau = np.dot(wy.T, cos_omega_t_tau)
YStau = np.dot(wy.T, sin_omega_t_tau)
CCtau = np.dot(w.T, cos_omega_t_tau * cos_omega_t_tau)
SStau = np.dot(w.T, sin_omega_t_tau * sin_omega_t_tau)
if fit_mean:
Ctau = np.dot(w.T, cos_omega_t_tau)
Stau = np.dot(w.T, sin_omega_t_tau)
YCtau -= Y * Ctau
YStau -= Y * Stau
CCtau -= Ctau * Ctau
SStau -= Stau * Stau
p = YCtau * YCtau / CCtau + YStau * YStau / SStau
YY = np.dot(w.T, y * y)
if normalization == "standard":
p /= YY
elif normalization == "model":
p /= YY - p
elif normalization == "log":
p = -np.log(1 - p / YY)
elif normalization == "psd":
p *= 0.5 * (dy**-2.0).sum()
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return p.ravel()
|
79b100b7c201b9cfd6d393545d3cd12750b0e4adca35617cf62315ab3ebd21b1 | import numpy as np
from .utils import trig_sum
def lombscargle_fastchi2(
t,
y,
dy,
f0,
df,
Nf,
normalization="standard",
fit_mean=True,
center_data=True,
nterms=1,
use_fft=True,
trig_sum_kwds=None,
):
"""Lomb-Scargle Periodogram
This implements a fast chi-squared periodogram using the algorithm
outlined in [4]_. The result is identical to the standard Lomb-Scargle
periodogram. The advantage of this algorithm is the
ability to compute multiterm periodograms relatively quickly.
Parameters
----------
t, y, dy : array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity`.
f0, df, Nf : (float, float, int)
parameters describing the frequency grid, f = f0 + df * arange(Nf).
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
fit_mean : bool, optional
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if ``fit_mean = False``
nterms : int, optional
Number of Fourier terms in the fit
Returns
-------
power : array-like
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
References
----------
.. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [2] W. Press et al, Numerical Recipes in C (2002)
.. [3] Scargle, J.D. ApJ 263:835-853 (1982)
.. [4] Palmer, J. ApJ 695:496-502 (2009)
"""
if nterms == 0 and not fit_mean:
raise ValueError("Cannot have nterms = 0 without fitting bias")
if dy is None:
dy = 1
# Validate and setup input data
t, y, dy = np.broadcast_arrays(t, y, dy)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
# Validate and setup frequency grid
if f0 < 0:
raise ValueError("Frequencies must be positive")
if df <= 0:
raise ValueError("Frequency steps must be positive")
if Nf <= 0:
raise ValueError("Number of frequencies must be positive")
w = dy**-2.0
ws = np.sum(w)
# if fit_mean is true, centering the data now simplifies the math below.
if center_data or fit_mean:
y = y - np.dot(w, y) / ws
yw = y / dy
chi2_ref = np.dot(yw, yw)
kwargs = dict.copy(trig_sum_kwds or {})
kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf)
# Here we build-up the matrices XTX and XTy using pre-computed
# sums. The relevant identities are
# 2 sin(mx) sin(nx) = cos(m-n)x - cos(m+n)x
# 2 cos(mx) cos(nx) = cos(m-n)x + cos(m+n)x
# 2 sin(mx) cos(nx) = sin(m-n)x + sin(m+n)x
yws = np.sum(y * w)
SCw = [(np.zeros(Nf), ws * np.ones(Nf))]
SCw.extend(
[trig_sum(t, w, freq_factor=i, **kwargs) for i in range(1, 2 * nterms + 1)]
)
Sw, Cw = zip(*SCw)
SCyw = [(np.zeros(Nf), yws * np.ones(Nf))]
SCyw.extend(
[trig_sum(t, w * y, freq_factor=i, **kwargs) for i in range(1, nterms + 1)]
)
Syw, Cyw = zip(*SCyw)
# Now create an indexing scheme so we can quickly
# build-up matrices at each frequency
order = [("C", 0)] if fit_mean else []
order.extend(sum(([("S", i), ("C", i)] for i in range(1, nterms + 1)), []))
funcs = dict(
S=lambda m, i: Syw[m][i],
C=lambda m, i: Cyw[m][i],
SS=lambda m, n, i: 0.5 * (Cw[abs(m - n)][i] - Cw[m + n][i]),
CC=lambda m, n, i: 0.5 * (Cw[abs(m - n)][i] + Cw[m + n][i]),
SC=lambda m, n, i: 0.5 * (np.sign(m - n) * Sw[abs(m - n)][i] + Sw[m + n][i]),
CS=lambda m, n, i: 0.5 * (np.sign(n - m) * Sw[abs(n - m)][i] + Sw[n + m][i]),
)
def compute_power(i):
XTX = np.array(
[[funcs[A[0] + B[0]](A[1], B[1], i) for A in order] for B in order]
)
XTy = np.array([funcs[A[0]](A[1], i) for A in order])
return np.dot(XTy.T, np.linalg.solve(XTX, XTy))
p = np.array([compute_power(i) for i in range(Nf)])
if normalization == "psd":
p *= 0.5
elif normalization == "standard":
p /= chi2_ref
elif normalization == "log":
p = -np.log(1 - p / chi2_ref)
elif normalization == "model":
p /= chi2_ref - p
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return p
|
e25abff5cdc4d9f544fe29e89d348dfe93eb6e6f64ebb9f3fa0e18e97fe2e51d | import numpy as np
def design_matrix(t, frequency, dy=None, bias=True, nterms=1):
"""Compute the Lomb-Scargle design matrix at the given frequency
This is the matrix X such that the periodic model at the given frequency
can be expressed :math:`\\hat{y} = X \\theta`.
Parameters
----------
t : array-like, shape=(n_times,)
times at which to compute the design matrix
frequency : float
frequency for the design matrix
dy : float or array-like, optional
data uncertainties: should be broadcastable with `t`
bias : bool (default=True)
If true, include a bias column in the matrix
nterms : int (default=1)
Number of Fourier terms to include in the model
Returns
-------
X : ndarray, shape=(n_times, n_parameters)
The design matrix, where n_parameters = bool(bias) + 2 * nterms
"""
t = np.asarray(t)
frequency = np.asarray(frequency)
if t.ndim != 1:
raise ValueError("t should be one dimensional")
if frequency.ndim != 0:
raise ValueError("frequency must be a scalar")
if nterms == 0 and not bias:
raise ValueError("cannot have nterms=0 and no bias")
if bias:
cols = [np.ones_like(t)]
else:
cols = []
for i in range(1, nterms + 1):
cols.append(np.sin(2 * np.pi * i * frequency * t))
cols.append(np.cos(2 * np.pi * i * frequency * t))
XT = np.vstack(cols)
if dy is not None:
XT /= dy
return np.transpose(XT)
def periodic_fit(t, y, dy, frequency, t_fit, center_data=True, fit_mean=True, nterms=1):
"""Compute the Lomb-Scargle model fit at a given frequency
Parameters
----------
t, y, dy : float or array-like
The times, observations, and uncertainties to fit
frequency : float
The frequency at which to compute the model
t_fit : float or array-like
The times at which the fit should be computed
center_data : bool (default=True)
If True, center the input data before applying the fit
fit_mean : bool (default=True)
If True, include the bias as part of the model
nterms : int (default=1)
The number of Fourier terms to include in the fit
Returns
-------
y_fit : ndarray
The model fit evaluated at each value of t_fit
"""
t, y, frequency = map(np.asarray, (t, y, frequency))
if dy is None:
dy = np.ones_like(y)
else:
dy = np.asarray(dy)
t_fit = np.asarray(t_fit)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
if t_fit.ndim != 1:
raise ValueError("t_fit should be one dimensional")
if frequency.ndim != 0:
raise ValueError("frequency should be a scalar")
if center_data:
w = dy**-2.0
y_mean = np.dot(y, w) / w.sum()
y = y - y_mean
else:
y_mean = 0
X = design_matrix(t, frequency, dy=dy, bias=fit_mean, nterms=nterms)
theta_MLE = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, y / dy))
X_fit = design_matrix(t_fit, frequency, bias=fit_mean, nterms=nterms)
return y_mean + np.dot(X_fit, theta_MLE)
|
47f398c7205cf62449648d2a455af646ad7e66843ced14f615d8f7e257876718 | import numpy as np
import pytest
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.timeseries.periodograms.lombscargle import LombScargle
from astropy.timeseries.periodograms.lombscargle._statistics import (
METHODS,
fap_single,
inv_fap_single,
)
from astropy.timeseries.periodograms.lombscargle.utils import (
compute_chi2_ref,
convert_normalization,
)
from astropy.utils.compat.optional_deps import HAS_SCIPY
METHOD_KWDS = dict(bootstrap={"n_bootstraps": 20, "random_seed": 42})
NORMALIZATIONS = ["standard", "psd", "log", "model"]
def make_data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0, units=False):
"""Generate some data for testing"""
rng = np.random.default_rng(rseed)
t = 5 * period * rng.random(N)
omega = 2 * np.pi / period
y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t)
dy = dy * (0.5 + rng.random(N))
y += dy * rng.standard_normal(N)
fmax = 5
if units:
return t * u.day, y * u.mag, dy * u.mag, fmax / u.day
else:
return t, y, dy, fmax
def null_data(N=1000, dy=1, rseed=0, units=False):
"""Generate null hypothesis data"""
rng = np.random.default_rng(rseed)
t = 100 * rng.random(N)
dy = 0.5 * dy * (1 + rng.random(N))
y = dy * rng.standard_normal(N)
fmax = 40
if units:
return t * u.day, y * u.mag, dy * u.mag, fmax / u.day
else:
return t, y, dy, fmax
@pytest.mark.parametrize("normalization", NORMALIZATIONS)
@pytest.mark.parametrize("with_errors", [True, False])
@pytest.mark.parametrize("units", [False, True])
def test_distribution(normalization, with_errors, units):
t, y, dy, fmax = null_data(units=units)
if not with_errors:
dy = None
ls = LombScargle(t, y, dy, normalization=normalization)
freq, power = ls.autopower(maximum_frequency=fmax)
z = np.linspace(0, power.max(), 1000)
# Test that pdf and cdf are consistent
dz = z[1] - z[0]
z_mid = z[:-1] + 0.5 * dz
pdf = ls.distribution(z_mid)
cdf = ls.distribution(z, cumulative=True)
if isinstance(dz, u.Quantity):
dz = dz.value
assert_allclose(pdf, np.diff(cdf) / dz, rtol=1e-5, atol=1e-8)
# psd normalization without specified errors produces bad results
if not (normalization == "psd" and not with_errors):
# Test that observed power is distributed according to the theoretical pdf
hist, bins = np.histogram(power, 30, density=True)
midpoints = 0.5 * (bins[1:] + bins[:-1])
pdf = ls.distribution(midpoints)
assert_allclose(hist, pdf, rtol=0.05, atol=0.05 * pdf[0])
@pytest.mark.parametrize("N", [10, 100, 1000])
@pytest.mark.parametrize("normalization", NORMALIZATIONS)
def test_inverse_single(N, normalization):
fap = np.linspace(0, 1, 11)
z = inv_fap_single(fap, N, normalization)
fap_out = fap_single(z, N, normalization)
assert_allclose(fap, fap_out)
@pytest.mark.parametrize("normalization", NORMALIZATIONS)
@pytest.mark.parametrize("use_errs", [True, False])
@pytest.mark.parametrize("units", [False, True])
def test_inverse_bootstrap(normalization, use_errs, units):
t, y, dy, fmax = null_data(units=units)
if not use_errs:
dy = None
fap = np.linspace(0, 1, 11)
method = "bootstrap"
method_kwds = METHOD_KWDS["bootstrap"]
ls = LombScargle(t, y, dy, normalization=normalization)
z = ls.false_alarm_level(
fap, maximum_frequency=fmax, method=method, method_kwds=method_kwds
)
fap_out = ls.false_alarm_probability(
z, maximum_frequency=fmax, method=method, method_kwds=method_kwds
)
# atol = 1 / n_bootstraps
assert_allclose(fap, fap_out, atol=0.05)
@pytest.mark.parametrize("method", sorted(set(METHODS) - {"bootstrap"}))
@pytest.mark.parametrize("normalization", NORMALIZATIONS)
@pytest.mark.parametrize("use_errs", [True, False])
@pytest.mark.parametrize("N", [10, 100, 1000])
@pytest.mark.parametrize("units", [False, True])
def test_inverses(method, normalization, use_errs, N, units, T=5):
if not HAS_SCIPY and method in ["baluev", "davies"]:
pytest.skip("SciPy required")
t, y, dy, fmax = make_data(N, rseed=543, units=units)
if not use_errs:
dy = None
method_kwds = METHOD_KWDS.get(method, None)
fap = np.logspace(-10, 0, 11)
ls = LombScargle(t, y, dy, normalization=normalization)
z = ls.false_alarm_level(
fap, maximum_frequency=fmax, method=method, method_kwds=method_kwds
)
fap_out = ls.false_alarm_probability(
z, maximum_frequency=fmax, method=method, method_kwds=method_kwds
)
assert_allclose(fap, fap_out)
@pytest.mark.parametrize("method", sorted(METHODS))
@pytest.mark.parametrize("normalization", NORMALIZATIONS)
@pytest.mark.parametrize("units", [False, True])
def test_false_alarm_smoketest(method, normalization, units):
if not HAS_SCIPY and method in ["baluev", "davies"]:
pytest.skip("SciPy required")
kwds = METHOD_KWDS.get(method, None)
t, y, dy, fmax = make_data(rseed=42, units=units)
ls = LombScargle(t, y, dy, normalization=normalization)
freq, power = ls.autopower(maximum_frequency=fmax)
Z = np.linspace(power.min(), power.max(), 30)
fap = ls.false_alarm_probability(
Z, maximum_frequency=fmax, method=method, method_kwds=kwds
)
assert len(fap) == len(Z)
if method != "davies":
assert np.all(fap <= 1)
assert np.all(fap[:-1] >= fap[1:]) # monotonically decreasing
@pytest.mark.parametrize("method", sorted(METHODS))
@pytest.mark.parametrize("use_errs", [True, False])
@pytest.mark.parametrize("normalization", sorted(set(NORMALIZATIONS) - {"psd"}))
@pytest.mark.parametrize("units", [False, True])
def test_false_alarm_equivalence(method, normalization, use_errs, units):
# Note: the PSD normalization is not equivalent to the others, in that it
# depends on the absolute errors rather than relative errors. Because the
# scaling contributes to the distribution, it cannot be converted directly
# from any of the three normalized versions.
if not HAS_SCIPY and method in ["baluev", "davies"]:
pytest.skip("SciPy required")
kwds = METHOD_KWDS.get(method, None)
t, y, dy, fmax = make_data(units=units)
if not use_errs:
dy = None
ls = LombScargle(t, y, dy, normalization=normalization)
freq, power = ls.autopower(maximum_frequency=fmax)
Z = np.linspace(power.min(), power.max(), 30)
fap = ls.false_alarm_probability(
Z, maximum_frequency=fmax, method=method, method_kwds=kwds
)
# Compute the equivalent Z values in the standard normalization
# and check that the FAP is consistent
Z_std = convert_normalization(
Z,
len(t),
from_normalization=normalization,
to_normalization="standard",
chi2_ref=compute_chi2_ref(y, dy),
)
ls = LombScargle(t, y, dy, normalization="standard")
fap_std = ls.false_alarm_probability(
Z_std, maximum_frequency=fmax, method=method, method_kwds=kwds
)
assert_allclose(fap, fap_std, rtol=0.1)
|
9f50d83a8a9afaf70969aed3e5d211b534edc37933b97bf8b9fc2d489de1338e | import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy.timeseries.periodograms.lombscargle.core import LombScargle
from astropy.timeseries.periodograms.lombscargle.utils import (
compute_chi2_ref,
convert_normalization,
)
NORMALIZATIONS = ["standard", "model", "log", "psd"]
@pytest.fixture
def data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0):
"""Generate some data for testing"""
rng = np.random.default_rng(rseed)
t = 5 * period * rng.random(N)
omega = 2 * np.pi / period
y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t)
dy = dy * (0.5 + rng.random(N))
y += dy * rng.standard_normal(N)
return t, y, dy
@pytest.mark.parametrize("norm_in", NORMALIZATIONS)
@pytest.mark.parametrize("norm_out", NORMALIZATIONS)
def test_convert_normalization(norm_in, norm_out, data):
t, y, dy = data
_, power_in = LombScargle(t, y, dy).autopower(
maximum_frequency=5, normalization=norm_in
)
_, power_out = LombScargle(t, y, dy).autopower(
maximum_frequency=5, normalization=norm_out
)
power_in_converted = convert_normalization(
power_in,
N=len(t),
from_normalization=norm_in,
to_normalization=norm_out,
chi2_ref=compute_chi2_ref(y, dy),
)
assert_allclose(power_in_converted, power_out)
|
c9609998a7e4cced37bff9377321cee96b02b2f8ce752df123fbc35c2ddf82e4 | import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time, TimeDelta
from astropy.timeseries.periodograms.lombscargle import LombScargle
ALL_METHODS = LombScargle.available_methods
ALL_METHODS_NO_AUTO = [method for method in ALL_METHODS if method != "auto"]
FAST_METHODS = [method for method in ALL_METHODS if "fast" in method]
NTERMS_METHODS = [method for method in ALL_METHODS if "chi2" in method]
NORMALIZATIONS = ["standard", "psd", "log", "model"]
@pytest.fixture
def data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0):
"""Generate some data for testing"""
rng = np.random.default_rng(rseed)
t = 20 * period * rng.random(N)
omega = 2 * np.pi / period
y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t)
dy = dy * (0.5 + rng.random(N))
y += dy * rng.standard_normal(N)
return t, y, dy
@pytest.mark.parametrize("minimum_frequency", [None, 1.0])
@pytest.mark.parametrize("maximum_frequency", [None, 5.0])
@pytest.mark.parametrize("nyquist_factor", [1, 10])
@pytest.mark.parametrize("samples_per_peak", [1, 5])
def test_autofrequency(
data, minimum_frequency, maximum_frequency, nyquist_factor, samples_per_peak
):
t, y, dy = data
baseline = t.max() - t.min()
freq = LombScargle(t, y, dy).autofrequency(
samples_per_peak, nyquist_factor, minimum_frequency, maximum_frequency
)
df = freq[1] - freq[0]
# Check sample spacing
assert_allclose(df, 1.0 / baseline / samples_per_peak)
# Check minimum frequency
if minimum_frequency is None:
assert_allclose(freq[0], 0.5 * df)
else:
assert_allclose(freq[0], minimum_frequency)
if maximum_frequency is None:
avg_nyquist = 0.5 * len(t) / baseline
assert_allclose(freq[-1], avg_nyquist * nyquist_factor, atol=0.5 * df)
else:
assert_allclose(freq[-1], maximum_frequency, atol=0.5 * df)
@pytest.mark.parametrize("method", ALL_METHODS_NO_AUTO)
@pytest.mark.parametrize("center_data", [True, False])
@pytest.mark.parametrize("fit_mean", [True, False])
@pytest.mark.parametrize("errors", ["none", "partial", "full"])
@pytest.mark.parametrize("with_units", [True, False])
@pytest.mark.parametrize("normalization", NORMALIZATIONS)
def test_all_methods(
data, method, center_data, fit_mean, errors, with_units, normalization
):
if method == "scipy" and (fit_mean or errors != "none"):
return
t, y, dy = data
frequency = 0.8 + 0.01 * np.arange(40)
if with_units:
t = t * u.day
y = y * u.mag
dy = dy * u.mag
frequency = frequency / t.unit
if errors == "none":
dy = None
elif errors == "partial":
dy = dy[0]
elif errors == "full":
pass
else:
raise ValueError(f"Unrecognized error type: '{errors}'")
kwds = {}
ls = LombScargle(
t,
y,
dy,
center_data=center_data,
fit_mean=fit_mean,
normalization=normalization,
)
P_expected = ls.power(frequency)
# don't use the fft approximation here; we'll test this elsewhere
if method in FAST_METHODS:
kwds["method_kwds"] = dict(use_fft=False)
P_method = ls.power(frequency, method=method, **kwds)
if with_units:
if normalization == "psd" and errors == "none":
assert P_method.unit == y.unit**2
else:
assert P_method.unit == u.dimensionless_unscaled
else:
assert not hasattr(P_method, "unit")
assert_quantity_allclose(P_expected, P_method)
@pytest.mark.parametrize("method", ALL_METHODS_NO_AUTO)
@pytest.mark.parametrize("center_data", [True, False])
@pytest.mark.parametrize("fit_mean", [True, False])
@pytest.mark.parametrize("with_errors", [True, False])
@pytest.mark.parametrize("normalization", NORMALIZATIONS)
def test_integer_inputs(
data, method, center_data, fit_mean, with_errors, normalization
):
if method == "scipy" and (fit_mean or with_errors):
return
t, y, dy = data
t = np.floor(100 * t)
t_int = t.astype(int)
y = np.floor(100 * y)
y_int = y.astype(int)
dy = np.floor(100 * dy)
dy_int = dy.astype("int32")
frequency = 1e-2 * (0.8 + 0.01 * np.arange(40))
if not with_errors:
dy = None
dy_int = None
kwds = dict(center_data=center_data, fit_mean=fit_mean, normalization=normalization)
P_float = LombScargle(t, y, dy, **kwds).power(frequency, method=method)
P_int = LombScargle(t_int, y_int, dy_int, **kwds).power(frequency, method=method)
assert_allclose(P_float, P_int)
@pytest.mark.parametrize("method", NTERMS_METHODS)
@pytest.mark.parametrize("center_data", [True, False])
@pytest.mark.parametrize("fit_mean", [True, False])
@pytest.mark.parametrize("errors", ["none", "partial", "full"])
@pytest.mark.parametrize("nterms", [0, 2, 4])
@pytest.mark.parametrize("normalization", NORMALIZATIONS)
def test_nterms_methods(
method, center_data, fit_mean, errors, nterms, normalization, data
):
t, y, dy = data
frequency = 0.8 + 0.01 * np.arange(40)
if errors == "none":
dy = None
elif errors == "partial":
dy = dy[0]
elif errors == "full":
pass
else:
raise ValueError(f"Unrecognized error type: '{errors}'")
ls = LombScargle(
t,
y,
dy,
center_data=center_data,
fit_mean=fit_mean,
nterms=nterms,
normalization=normalization,
)
if nterms == 0 and not fit_mean:
with pytest.raises(ValueError, match=r"[nterms, blas]"):
ls.power(frequency, method=method)
else:
P_expected = ls.power(frequency)
# don't use fast fft approximations here
kwds = {}
if "fast" in method:
kwds["method_kwds"] = dict(use_fft=False)
P_method = ls.power(frequency, method=method, **kwds)
assert_allclose(P_expected, P_method, rtol=1e-7, atol=1e-25)
@pytest.mark.parametrize("method", FAST_METHODS)
@pytest.mark.parametrize("center_data", [True, False])
@pytest.mark.parametrize("fit_mean", [True, False])
@pytest.mark.parametrize("errors", ["none", "partial", "full"])
@pytest.mark.parametrize("nterms", [0, 1, 2])
def test_fast_approximations(method, center_data, fit_mean, errors, nterms, data):
t, y, dy = data
frequency = 0.8 + 0.01 * np.arange(40)
if errors == "none":
dy = None
elif errors == "partial":
dy = dy[0]
elif errors == "full":
pass
else:
raise ValueError(f"Unrecognized error type: '{errors}'")
ls = LombScargle(
t,
y,
dy,
center_data=center_data,
fit_mean=fit_mean,
nterms=nterms,
normalization="standard",
)
# use only standard normalization because we compare via absolute tolerance
kwds = dict(method=method)
if method == "fast" and nterms != 1:
with pytest.raises(ValueError, match=r"nterms"):
ls.power(frequency, **kwds)
elif nterms == 0 and not fit_mean:
with pytest.raises(ValueError, match=r"[nterms, blas]"):
ls.power(frequency, **kwds)
else:
P_fast = ls.power(frequency, **kwds)
kwds["method_kwds"] = dict(use_fft=False)
P_slow = ls.power(frequency, **kwds)
assert_allclose(P_fast, P_slow, atol=0.008)
@pytest.mark.parametrize("method", LombScargle.available_methods)
@pytest.mark.parametrize("shape", [(), (1,), (2,), (3,), (2, 3)])
def test_output_shapes(method, shape, data):
t, y, dy = data
freq = np.asarray(np.zeros(shape))
freq.flat = np.arange(1, freq.size + 1)
PLS = LombScargle(t, y, fit_mean=False).power(freq, method=method)
assert PLS.shape == shape
@pytest.mark.parametrize("method", LombScargle.available_methods)
def test_errors_on_unit_mismatch(method, data):
t, y, dy = data
t = t * u.second
y = y * u.mag
frequency = np.linspace(0.5, 1.5, 10)
# this should fail because frequency and 1/t units do not match
MESSAGE = r"Units of {} not equivalent"
with pytest.raises(ValueError, match=MESSAGE.format("frequency")):
LombScargle(t, y, fit_mean=False).power(frequency, method=method)
# this should fail because dy and y units do not match
with pytest.raises(ValueError, match=MESSAGE.format("dy")):
LombScargle(t, y, dy, fit_mean=False).power(frequency / t.unit)
# we don't test all normalizations here because they are tested above
# only test method='auto' because unit handling does not depend on method
@pytest.mark.parametrize("with_error", [True, False])
def test_unit_conversions(data, with_error):
t, y, dy = data
t_day = t * u.day
t_hour = u.Quantity(t_day, "hour")
y_meter = y * u.meter
y_millimeter = u.Quantity(y_meter, "millimeter")
# sanity check on inputs
assert_quantity_allclose(t_day, t_hour)
assert_quantity_allclose(y_meter, y_millimeter)
if with_error:
dy = dy * u.meter
else:
dy = None
freq_day, P1 = LombScargle(t_day, y_meter, dy).autopower()
freq_hour, P2 = LombScargle(t_hour, y_millimeter, dy).autopower()
# Check units of frequency
assert freq_day.unit == 1.0 / u.day
assert freq_hour.unit == 1.0 / u.hour
# Check that results match
assert_quantity_allclose(freq_day, freq_hour)
assert_quantity_allclose(P1, P2)
# Check that switching frequency units doesn't change things
P3 = LombScargle(t_day, y_meter, dy).power(freq_hour)
P4 = LombScargle(t_hour, y_meter, dy).power(freq_day)
assert_quantity_allclose(P3, P4)
@pytest.mark.parametrize("fit_mean", [True, False])
@pytest.mark.parametrize("with_units", [True, False])
@pytest.mark.parametrize("freq", [1.0, 2.0])
def test_model(fit_mean, with_units, freq):
rand = np.random.default_rng(0)
t = 10 * rand.random(40)
params = 10 * rand.random(3)
y = np.zeros_like(t)
if fit_mean:
y += params[0]
y += params[1] * np.sin(2 * np.pi * freq * (t - params[2]))
if with_units:
t = t * u.day
y = y * u.mag
freq = freq / u.day
ls = LombScargle(t, y, center_data=False, fit_mean=fit_mean)
y_fit = ls.model(t, freq)
assert_quantity_allclose(y_fit, y)
@pytest.mark.parametrize("t_unit", [u.second, u.day])
@pytest.mark.parametrize("frequency_unit", [u.Hz, 1.0 / u.second])
@pytest.mark.parametrize("y_unit", [u.mag, u.jansky])
def test_model_units_match(data, t_unit, frequency_unit, y_unit):
t, y, dy = data
t_fit = t[:5]
frequency = 1.0
t = t * t_unit
t_fit = t_fit * t_unit
y = y * y_unit
dy = dy * y_unit
frequency = frequency * frequency_unit
ls = LombScargle(t, y, dy)
y_fit = ls.model(t_fit, frequency)
assert y_fit.unit == y_unit
def test_model_units_mismatch(data):
t, y, dy = data
frequency = 1.0
t_fit = t[:5]
t = t * u.second
t_fit = t_fit * u.second
y = y * u.mag
frequency = 1.0 / t.unit
# this should fail because frequency and 1/t units do not match
MESSAGE = r"Units of {} not equivalent"
with pytest.raises(ValueError, match=MESSAGE.format("frequency")):
LombScargle(t, y).model(t_fit, frequency=1.0)
# this should fail because t and t_fit units do not match
with pytest.raises(ValueError, match=MESSAGE.format("t")):
LombScargle(t, y).model([1, 2], frequency)
# this should fail because dy and y units do not match
with pytest.raises(ValueError, match=MESSAGE.format("dy")):
LombScargle(t, y, dy).model(t_fit, frequency)
def test_autopower(data):
t, y, dy = data
ls = LombScargle(t, y, dy)
kwargs = dict(
samples_per_peak=6,
nyquist_factor=2,
minimum_frequency=2,
maximum_frequency=None,
)
freq1 = ls.autofrequency(**kwargs)
power1 = ls.power(freq1)
freq2, power2 = ls.autopower(**kwargs)
assert_allclose(freq1, freq2)
assert_allclose(power1, power2)
@pytest.mark.parametrize("with_units", [True, False])
@pytest.mark.parametrize("errors", ["none", "partial", "full"])
@pytest.mark.parametrize("center_data", [True, False])
@pytest.mark.parametrize("fit_mean", [True, False])
@pytest.mark.parametrize("nterms", [0, 1, 2])
def test_model_parameters(data, nterms, fit_mean, center_data, errors, with_units):
if nterms == 0 and not fit_mean:
return
t, y, dy = data
frequency = 1.5
if with_units:
t = t * u.day
y = y * u.mag
dy = dy * u.mag
frequency = frequency / t.unit
if errors == "none":
dy = None
elif errors == "partial":
dy = dy[0]
elif errors == "full":
pass
else:
raise ValueError(f"Unrecognized error type: '{errors}'")
ls = LombScargle(
t, y, dy, nterms=nterms, fit_mean=fit_mean, center_data=center_data
)
tfit = np.linspace(0, 20, 10)
if with_units:
tfit = tfit * u.day
model = ls.model(tfit, frequency)
params = ls.model_parameters(frequency)
design = ls.design_matrix(frequency, t=tfit)
offset = ls.offset()
assert len(params) == int(fit_mean) + 2 * nterms
assert_quantity_allclose(offset + design.dot(params), model)
@pytest.mark.parametrize("timedelta", [False, True])
def test_absolute_times(data, timedelta):
# Make sure that we handle absolute times correctly. We also check that
# TimeDelta works properly when timedelta is True.
# The example data uses relative times
t, y, dy = data
# FIXME: There seems to be a numerical stability issue in that if we run
# the algorithm with the same values but offset in time, the transit_time
# is not offset by a fixed amount. To avoid this issue in this test, we
# make sure the first time is also the smallest so that internally the
# values of the relative time should be the same.
t[0] = 0.0
# Add units
t = t * u.day
y = y * u.mag
dy = dy * u.mag
# We now construct a set of absolute times but keeping the rest the same
start = Time("2019-05-04T12:34:56")
trel = TimeDelta(t) if timedelta else t
t = trel + start
# and we set up two instances of LombScargle, one with absolute and one
# with relative times.
ls1 = LombScargle(t, y, dy)
ls2 = LombScargle(trel, y, dy)
kwargs = dict(
samples_per_peak=6,
nyquist_factor=2,
minimum_frequency=2 / u.day,
maximum_frequency=None,
)
freq1 = ls1.autofrequency(**kwargs)
freq2 = ls2.autofrequency(**kwargs)
assert_quantity_allclose(freq1, freq2)
power1 = ls1.power(freq1)
power2 = ls2.power(freq2)
assert_quantity_allclose(power1, power2)
freq1, power1 = ls1.autopower(**kwargs)
freq2, power2 = ls2.autopower(**kwargs)
assert_quantity_allclose(freq1, freq2)
assert_quantity_allclose(power1, power2)
model1 = ls1.model(t, 2 / u.day)
model2 = ls2.model(trel, 2 / u.day)
assert_quantity_allclose(model1, model2)
# Check model validation
MESSAGE = (
r"t was provided as {} time but the LombScargle class was initialized with {}"
r" times."
)
with pytest.raises(TypeError, match=MESSAGE.format("a relative", "absolute")):
ls1.model(trel, 2 / u.day)
with pytest.raises(TypeError, match=MESSAGE.format("an absolute", "relative")):
ls2.model(t, 2 / u.day)
# Check design matrix
design1 = ls1.design_matrix(2 / u.day, t=t)
design2 = ls2.design_matrix(2 / u.day, t=trel)
assert_quantity_allclose(design1, design2)
# Check design matrix validation
with pytest.raises(TypeError, match=MESSAGE.format("a relative", "absolute")):
ls1.design_matrix(2 / u.day, t=trel)
with pytest.raises(TypeError, match=MESSAGE.format("an absolute", "relative")):
ls2.design_matrix(2 / u.day, t=t)
|
88c970795d5f769b57fc61d1e53f6e288541f24b18eefd8a7137939da081eb54 | import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
from astropy.timeseries.periodograms.lombscargle.implementations.utils import (
bitceil,
extirpolate,
trig_sum,
)
@pytest.mark.parametrize("N", 2 ** np.arange(1, 12))
@pytest.mark.parametrize("offset", [-1, 0, 1])
def test_bitceil(N, offset):
assert_equal(bitceil(N + offset), int(2 ** np.ceil(np.log2(N + offset))))
@pytest.fixture
def extirpolate_data():
rng = np.random.default_rng(0)
x = 100 * rng.random(50)
y = np.sin(x)
f = lambda x: np.sin(x / 10)
return x, y, f
@pytest.mark.parametrize("N", [100, None])
@pytest.mark.parametrize("M", [5])
def test_extirpolate(N, M, extirpolate_data):
x, y, f = extirpolate_data
y_hat = extirpolate(x, y, N, M)
x_hat = np.arange(len(y_hat))
assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat), rtol=1.5e-5)
@pytest.fixture
def extirpolate_int_data():
rng = np.random.default_rng(0)
x = 100 * rng.random(50)
x[:25] = x[:25].astype(int)
y = np.sin(x)
f = lambda x: np.sin(x / 10)
return x, y, f
@pytest.mark.parametrize("N", [100, None])
@pytest.mark.parametrize("M", [5])
def test_extirpolate_with_integers(N, M, extirpolate_int_data):
x, y, f = extirpolate_int_data
y_hat = extirpolate(x, y, N, M)
x_hat = np.arange(len(y_hat))
assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat), rtol=1.7e-5)
@pytest.fixture
def trig_sum_data():
rng = np.random.default_rng(0)
t = 10 * rng.random(50)
h = np.sin(t)
return t, h
@pytest.mark.parametrize("f0", [0, 1])
@pytest.mark.parametrize("adjust_t", [True, False])
@pytest.mark.parametrize("freq_factor", [1, 2])
@pytest.mark.parametrize("df", [0.1])
def test_trig_sum(f0, adjust_t, freq_factor, df, trig_sum_data):
t, h = trig_sum_data
tfit = t - t.min() if adjust_t else t
S1, C1 = trig_sum(
tfit,
h,
df,
N=1000,
use_fft=True,
f0=f0,
freq_factor=freq_factor,
oversampling=10,
)
S2, C2 = trig_sum(
tfit,
h,
df,
N=1000,
use_fft=False,
f0=f0,
freq_factor=freq_factor,
oversampling=10,
)
assert_allclose(S1, S2, atol=1e-2)
assert_allclose(C1, C2, atol=1e-2)
|
df343e6da494f06169d5c36b29f539b4c5f9a0b25bb039ae22bb86a06d28278f | import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy.timeseries.periodograms.lombscargle.implementations.mle import (
design_matrix,
periodic_fit,
)
@pytest.fixture
def t():
rand = np.random.default_rng(42)
return 10 * rand.random(10)
@pytest.mark.parametrize("freq", [1.0, 2])
@pytest.mark.parametrize("dy", [None, 2.0])
@pytest.mark.parametrize("bias", [True, False])
def test_design_matrix(t, freq, dy, bias):
X = design_matrix(t, freq, dy, bias=bias)
assert X.shape == (t.shape[0], 2 + bool(bias))
if bias:
assert_allclose(X[:, 0], 1.0 / (dy or 1.0))
assert_allclose(X[:, -2], np.sin(2 * np.pi * freq * t) / (dy or 1.0))
assert_allclose(X[:, -1], np.cos(2 * np.pi * freq * t) / (dy or 1.0))
@pytest.mark.parametrize("nterms", range(4))
def test_multiterm_design_matrix(t, nterms):
dy = 2.0
freq = 1.5
X = design_matrix(t, freq, dy=dy, bias=True, nterms=nterms)
assert X.shape == (t.shape[0], 1 + 2 * nterms)
assert_allclose(X[:, 0], 1.0 / dy)
for i in range(1, nterms + 1):
assert_allclose(X[:, 2 * i - 1], np.sin(2 * np.pi * i * freq * t) / dy)
assert_allclose(X[:, 2 * i], np.cos(2 * np.pi * i * freq * t) / dy)
@pytest.mark.parametrize("nterms", range(1, 4))
@pytest.mark.parametrize("freq", [1, 2])
@pytest.mark.parametrize("fit_mean", [True, False])
def test_exact_mle_fit(nterms, freq, fit_mean):
rand = np.random.default_rng(42)
t = 10 * rand.random(30)
theta = -1 + rand.random(2 * nterms + 1)
y = np.zeros(t.shape)
if fit_mean:
y = theta[0] * np.ones(t.shape)
for i in range(1, nterms + 1):
y += theta[2 * i - 1] * np.sin(2 * np.pi * i * freq * t)
y += theta[2 * i] * np.cos(2 * np.pi * i * freq * t)
y_fit = periodic_fit(
t,
y,
dy=1,
frequency=freq,
t_fit=t,
nterms=nterms,
center_data=False,
fit_mean=fit_mean,
)
assert_allclose(y, y_fit)
|
2b439e8f29511589543b285cb2fe5f6e914fa6f217b614d975d4f9d6ab6af0bd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from unittest import mock
import pytest
from astropy.io.fits import BinTableHDU, HDUList, Header, PrimaryHDU
from astropy.timeseries.io.kepler import kepler_fits_reader
from astropy.utils.data import get_pkg_data_filename
def fake_header(extver, version, timesys, telescop):
return Header(
{
"SIMPLE": "T",
"BITPIX": 8,
"NAXIS": 0,
"EXTVER": extver,
"VERSION": version,
"TIMESYS": f"{timesys}",
"TELESCOP": f"{telescop}",
}
)
def fake_hdulist(extver=1, version=2, timesys="TDB", telescop="KEPLER"):
new_header = fake_header(extver, version, timesys, telescop)
return [
HDUList(
hdus=[
PrimaryHDU(header=new_header),
BinTableHDU(header=new_header, name="LIGHTCURVE"),
]
)
]
@mock.patch("astropy.io.fits.open", side_effect=fake_hdulist(telescop="MadeUp"))
def test_raise_telescop_wrong(mock_file):
with pytest.raises(
NotImplementedError,
match=(
r"MadeUp is not implemented, only KEPLER or TESS are supported through this"
r" reader"
),
):
kepler_fits_reader(None)
@mock.patch("astropy.io.fits.open", side_effect=fake_hdulist(extver=2))
def test_raise_extversion_kepler(mock_file):
with pytest.raises(
NotImplementedError, match=r"Support for KEPLER v2 files not yet implemented"
):
kepler_fits_reader(None)
@mock.patch("astropy.io.fits.open", side_effect=fake_hdulist(extver=2, telescop="TESS"))
def test_raise_extversion_tess(mock_file):
with pytest.raises(
NotImplementedError, match=r"Support for TESS v2 files not yet implemented"
):
kepler_fits_reader(None)
@mock.patch("astropy.io.fits.open", side_effect=fake_hdulist(timesys="TCB"))
def test_raise_timesys_kepler(mock_file):
with pytest.raises(
NotImplementedError,
match=r"Support for TCB time scale not yet implemented in KEPLER reader",
):
kepler_fits_reader(None)
@mock.patch(
"astropy.io.fits.open", side_effect=fake_hdulist(timesys="TCB", telescop="TESS")
)
def test_raise_timesys_tess(mock_file):
with pytest.raises(
NotImplementedError,
match=r"Support for TCB time scale not yet implemented in TESS reader",
):
kepler_fits_reader(None)
@pytest.mark.remote_data(source="astropy")
def test_kepler_astropy():
from astropy.units import UnitsWarning
filename = get_pkg_data_filename("timeseries/kplr010666592-2009131110544_slc.fits")
with pytest.warns(UnitsWarning):
timeseries = kepler_fits_reader(filename)
assert timeseries["time"].format == "isot"
assert timeseries["time"].scale == "tdb"
assert timeseries["sap_flux"].unit.to_string() == "electron / s"
assert len(timeseries) == 14280
assert len(timeseries.columns) == 20
@pytest.mark.remote_data(source="astropy")
def test_tess_astropy():
filename = get_pkg_data_filename(
"timeseries/hlsp_tess-data-alerts_tess_phot_00025155310-s01_tess_v1_lc.fits"
)
with pytest.warns(UserWarning, match="Ignoring 815 rows with NaN times"):
timeseries = kepler_fits_reader(filename)
assert timeseries["time"].format == "isot"
assert timeseries["time"].scale == "tdb"
assert timeseries["sap_flux"].unit.to_string() == "electron / s"
assert len(timeseries) == 19261
assert len(timeseries.columns) == 20
|
a9bc321b0593ce46a3d2a7d4d7458d05140db961808955a114513ddd108215d2 | import pytest
import astropy.constants as const
from astropy import astronomical_constants, physical_constants
def test_version_match():
pversion = physical_constants.get()
refpversion = const.h.__class__.__name__.lower()
assert pversion == refpversion
aversion = astronomical_constants.get()
refaversion = const.M_sun.__class__.__name__.lower()
assert aversion == refaversion
def test_previously_imported():
with pytest.raises(RuntimeError):
physical_constants.set("codata2018")
with pytest.raises(RuntimeError):
astronomical_constants.set("iau2015")
|
0c680475adf40509ead694d191b200fa02460433e838ac39327e964fee5595f0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy import constants as const
from astropy.tests.helper import check_pickling_recovery, pickle_protocol # noqa: F401
originals = [
const.Constant("h_fake", "Not Planck", 0.0, "J s", 0.0, "fakeref", system="si"),
const.h,
const.e.si,
]
@pytest.mark.parametrize("original", originals)
def test_new_constant(pickle_protocol, original): # noqa: F811
check_pickling_recovery(original, pickle_protocol)
|
90a4eb22ce46df70f2c9a713cca02e6c44107e9d4e8cdec7492fd3501b61e01a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import numpy as np
import pytest
from astropy.constants import Constant
from astropy.units import Quantity as Q
def test_c():
from astropy.constants.codata2010 import c
# c is an exactly defined constant, so it shouldn't be changing
assert c.value == 2.99792458e8 # default is S.I.
assert c.si.value == 2.99792458e8
assert c.cgs.value == 2.99792458e10
# make sure it has the necessary attributes and they're not blank
assert c.uncertainty == 0 # c is a *defined* quantity
assert c.name
assert c.reference
assert c.unit
def test_h():
from astropy.constants import h as h_current
from astropy.constants.codata2010 import h
# check that the value is the CODATA2010 value
assert abs(h.value - 6.62606957e-34) < 1e-43
assert abs(h.si.value - 6.62606957e-34) < 1e-43
assert abs(h.cgs.value - 6.62606957e-27) < 1e-36
# Check it is different than the current value
assert abs(h.value - h_current.value) > 4e-42
# make sure it has the necessary attributes and they're not blank
assert h.uncertainty
assert h.name
assert h.reference
assert h.unit
def test_e():
from astropy.constants.astropyconst13 import e as e_13
# A test quantity
E = Q(100.00000348276221, "V/m")
# e.cgs is too ambiguous and should not work at all
with pytest.raises(TypeError):
e_13.cgs * E
assert isinstance(e_13.si, Q)
assert isinstance(e_13.gauss, Q)
assert isinstance(e_13.esu, Q)
assert e_13.gauss * E == Q(e_13.gauss.value * E.value, "Fr V/m")
assert e_13.esu * E == Q(e_13.esu.value * E.value, "Fr V/m")
def test_g0():
"""Tests for #1263 demonstrating how g0 constant should behave."""
from astropy.constants.astropyconst13 import g0
# g0 is an exactly defined constant, so it shouldn't be changing
assert g0.value == 9.80665 # default is S.I.
assert g0.si.value == 9.80665
assert g0.cgs.value == 9.80665e2
# make sure it has the necessary attributes and they're not blank
assert g0.uncertainty == 0 # g0 is a *defined* quantity
assert g0.name
assert g0.reference
assert g0.unit
# Check that its unit have the correct physical type
assert g0.unit.physical_type == "acceleration"
def test_b_wien():
"""b_wien should give the correct peak wavelength for
given blackbody temperature. The Sun is used in this test.
"""
from astropy import units as u
from astropy.constants.astropyconst13 import b_wien
t = 5778 * u.K
w = (b_wien / t).to(u.nm)
assert round(w.value) == 502
def test_pc():
"""Parsec is defined to use small-angle limit per IAU 2015 Resolution B 2.
iau2012 version still uses tan(parallax).
"""
from astropy import units as u
from astropy.constants import iau2012
plx = np.radians(1 / 3600)
assert np.allclose(
u.pc.to("m") / iau2012.pc.si.value, np.tan(plx) / plx, rtol=1.0e-14, atol=0
)
def test_masses():
"""Ensure mass values are set up correctly.
https://github.com/astropy/astropy/issues/8920
"""
from astropy.constants import astropyconst13, astropyconst20, astropyconst40
ref_text = "Allen's Astrophysical Quantities 4th Ed."
assert (
astropyconst13.M_sun.reference == ref_text
and astropyconst13.M_jup.reference == ref_text
and astropyconst13.M_earth.reference == ref_text
)
ref_text = "IAU 2015 Resolution B 3 + CODATA 2014"
assert (
astropyconst20.M_sun.reference == ref_text
and astropyconst20.M_jup.reference == ref_text
and astropyconst20.M_earth.reference == ref_text
)
ref_text = "IAU 2015 Resolution B 3 + CODATA 2018"
assert (
astropyconst40.M_sun.reference == ref_text
and astropyconst40.M_jup.reference == ref_text
and astropyconst40.M_earth.reference == ref_text
)
def test_unit():
from astropy import units as u
from astropy.constants import astropyconst13 as const
for key, val in vars(const).items():
if isinstance(val, Constant):
# Getting the unit forces the unit parser to run. Confirm
# that none of the constants defined in astropy have
# invalid unit.
assert not isinstance(val.unit, u.UnrecognizedUnit)
def test_copy():
from astropy import constants as const
cc = copy.deepcopy(const.c)
assert cc == const.c
cc = copy.copy(const.c)
assert cc == const.c
def test_view():
"""Check that Constant and Quantity views can be taken (#3537, #3538)."""
from astropy.constants import c
c2 = c.view(Constant)
assert c2 == c
assert c2.value == c.value
# make sure it has the necessary attributes and they're not blank
assert c2.uncertainty == 0 # c is a *defined* quantity
assert c2.name == c.name
assert c2.reference == c.reference
assert c2.unit == c.unit
q1 = c.view(Q)
assert q1 == c
assert q1.value == c.value
assert type(q1) is Q
assert not hasattr(q1, "reference")
q2 = Q(c)
assert q2 == c
assert q2.value == c.value
assert type(q2) is Q
assert not hasattr(q2, "reference")
c3 = Q(c, subok=True)
assert c3 == c
assert c3.value == c.value
# make sure it has the necessary attributes and they're not blank
assert c3.uncertainty == 0 # c is a *defined* quantity
assert c3.name == c.name
assert c3.reference == c.reference
assert c3.unit == c.unit
c4 = Q(c, subok=True, copy=False)
assert c4 is c
|
6a5197153bb6bc5bf23fd01ceaebba730332929b8cd3008a2c6c00e1f276a886 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import pytest
from astropy.constants import Constant
from astropy.units import Quantity as Q
def test_c():
from astropy.constants import c
# c is an exactly defined constant, so it shouldn't be changing
assert c.value == 2.99792458e8 # default is S.I.
assert c.si.value == 2.99792458e8
assert c.cgs.value == 2.99792458e10
# make sure it has the necessary attributes and they're not blank
assert c.uncertainty == 0 # c is a *defined* quantity
assert c.name
assert c.reference
assert c.unit
def test_h():
from astropy.constants import h
# check that the value is fairly close to what it should be (not exactly
# checking because this might get updated in the future)
assert abs(h.value - 6.626e-34) < 1e-38
assert abs(h.si.value - 6.626e-34) < 1e-38
assert abs(h.cgs.value - 6.626e-27) < 1e-31
# make sure it has the necessary attributes and they're not blank
assert h.uncertainty == 0 # CODATA 2018 set h to exact value
assert h.name
assert h.reference
assert h.unit
def test_e():
"""Tests for #572 demonstrating how EM constants should behave."""
from astropy.constants import e
# A test quantity
E = Q(100, "V/m")
# Without specifying a system e should not combine with other quantities
pytest.raises(TypeError, lambda: e * E)
# Try it again (as regression test on a minor issue mentioned in #745 where
# repeated attempts to use e in an expression resulted in UnboundLocalError
# instead of TypeError)
pytest.raises(TypeError, lambda: e * E)
# e.cgs is too ambiguous and should not work at all
pytest.raises(TypeError, lambda: e.cgs * E)
assert isinstance(e.si, Q)
assert isinstance(e.gauss, Q)
assert isinstance(e.esu, Q)
assert e.si * E == Q(100, "eV/m")
assert e.gauss * E == Q(e.gauss.value * E.value, "Fr V/m")
assert e.esu * E == Q(e.esu.value * E.value, "Fr V/m")
def test_g0():
"""Tests for #1263 demonstrating how g0 constant should behave."""
from astropy.constants import g0
# g0 is an exactly defined constant, so it shouldn't be changing
assert g0.value == 9.80665 # default is S.I.
assert g0.si.value == 9.80665
assert g0.cgs.value == 9.80665e2
# make sure it has the necessary attributes and they're not blank
assert g0.uncertainty == 0 # g0 is a *defined* quantity
assert g0.name
assert g0.reference
assert g0.unit
# Check that its unit have the correct physical type
assert g0.unit.physical_type == "acceleration"
def test_b_wien():
"""b_wien should give the correct peak wavelength for
given blackbody temperature. The Sun is used in this test.
"""
from astropy import units as u
from astropy.constants import b_wien
t = 5778 * u.K
w = (b_wien / t).to(u.nm)
assert round(w.value) == 502
def test_unit():
from astropy import constants as const
from astropy import units as u
for key, val in vars(const).items():
if isinstance(val, Constant):
# Getting the unit forces the unit parser to run. Confirm
# that none of the constants defined in astropy have
# invalid unit.
assert not isinstance(val.unit, u.UnrecognizedUnit)
def test_copy():
from astropy import constants as const
cc = copy.deepcopy(const.c)
assert cc == const.c
cc = copy.copy(const.c)
assert cc == const.c
def test_view():
"""Check that Constant and Quantity views can be taken (#3537, #3538)."""
from astropy.constants import c
c2 = c.view(Constant)
assert c2 == c
assert c2.value == c.value
# make sure it has the necessary attributes and they're not blank
assert c2.uncertainty == 0 # c is a *defined* quantity
assert c2.name == c.name
assert c2.reference == c.reference
assert c2.unit == c.unit
q1 = c.view(Q)
assert q1 == c
assert q1.value == c.value
assert type(q1) is Q
assert not hasattr(q1, "reference")
q2 = Q(c)
assert q2 == c
assert q2.value == c.value
assert type(q2) is Q
assert not hasattr(q2, "reference")
c3 = Q(c, subok=True)
assert c3 == c
assert c3.value == c.value
# make sure it has the necessary attributes and they're not blank
assert c3.uncertainty == 0 # c is a *defined* quantity
assert c3.name == c.name
assert c3.reference == c.reference
assert c3.unit == c.unit
c4 = Q(c, subok=True, copy=False)
assert c4 is c
|
f3fc4c5c0c1e84463f0fb770d68b83a38bb7373c62a40f22c519e50568fe3bb1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Comparison functions for `astropy.cosmology.Cosmology`.
This module is **NOT** public API. To use these functions, import them from
the top-level namespace -- :mod:`astropy.cosmology`.
This module will be moved.
"""
from __future__ import annotations
import functools
import inspect
from typing import Any, Callable, Tuple, Union
import numpy as np
from numpy import False_, True_, ndarray
from astropy import table
from astropy.cosmology.core import Cosmology
__all__ = [] # Nothing is scoped here
##############################################################################
# PARAMETERS
_FormatType = Union[bool, None, str]
_FormatsT = Union[_FormatType, Tuple[_FormatType, ...]]
_CompFnT = Callable[[Any, _FormatType], Cosmology]
_COSMO_AOK: set[Any] = {None, True_, False_, "astropy.cosmology"}
# The numpy bool also catches real bool for ops "==" and "in"
##############################################################################
# UTILITIES
class _CosmologyWrapper:
"""
A private wrapper class to hide things from :mod:`numpy`.
This should never be exposed to the user.
"""
__slots__ = ("wrapped",)
# Use less memory and speed up initilization.
_cantbroadcast: tuple[type, ...] = (table.Row, table.Table)
"""
Have to deal with things that do not broadcast well. e.g.
`~astropy.table.Row` cannot be used in an array, even if ``dtype=object``
and will raise a segfault when used in a `numpy.ufunc`.
"""
wrapped: Any
def __init__(self, wrapped: Any) -> None:
self.wrapped = wrapped
# TODO! when py3.9+ use @functools.partial(np.frompyfunc, nin=2, nout=1)
# TODO! https://github.com/numpy/numpy/issues/9477 segfaults on astropy.row
# and np.vectorize can't coerce table to dtypes
def _wrap_to_ufunc(nin: int, nout: int) -> Callable[[_CompFnT], np.ufunc]:
def wrapper(pyfunc: _CompFnT) -> np.ufunc:
ufunc = np.frompyfunc(pyfunc, 2, 1)
return ufunc
return wrapper
@_wrap_to_ufunc(2, 1)
def _parse_format(cosmo: Any, format: _FormatType, /) -> Cosmology:
"""Parse Cosmology-like input into Cosmologies, given a format hint.
Parameters
----------
cosmo : |Cosmology|-like, positional-only
|Cosmology| to parse.
format : bool or None or str, positional-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be equivalent
to a |Cosmology|. `False` (default) will not allow conversion. `True` or
`None` will, and will use the auto-identification to try to infer the
correct format. A `str` is assumed to be the correct format to use when
converting.
Returns
-------
|Cosmology| or generator thereof
Raises
------
TypeError
If ``cosmo`` is not a |Cosmology| and ``format`` equals `False`.
TypeError
If ``cosmo`` is a |Cosmology| and ``format`` is not `None` or equal to
`True`.
"""
# Deal with private wrapper
if isinstance(cosmo, _CosmologyWrapper):
cosmo = cosmo.wrapped
# Shortcut if already a cosmology
if isinstance(cosmo, Cosmology):
if format not in _COSMO_AOK:
allowed = "/".join(map(str, _COSMO_AOK))
raise ValueError(
f"for parsing a Cosmology, 'format' must be {allowed}, not {format}"
)
return cosmo
# Convert, if allowed.
elif format == False_: # catches False and False_
raise TypeError(
f"if 'format' is False, arguments must be a Cosmology, not {cosmo}"
)
else:
format = None if format == True_ else format # str->str, None/True/True_->None
out = Cosmology.from_format(cosmo, format=format) # this can error!
return out
def _parse_formats(*cosmos: object, format: _FormatsT) -> ndarray:
"""Parse Cosmology-like to |Cosmology|, using provided formats.
``format`` is broadcast to match the shape of the cosmology arguments. Note
that the cosmology arguments are not broadcast against ``format``, so it
cannot determine the output shape.
Parameters
----------
*cosmos : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by the corresponding ``format``.
format : bool or None or str or array-like thereof, positional-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be equivalent
to a |Cosmology|. `False` (default) will not allow conversion. `True` or
`None` will, and will use the auto-identification to try to infer the
correct format. A `str` is assumed to be the correct format to use when
converting. Note ``format`` is broadcast as an object array to match the
shape of ``cosmos`` so ``format`` cannot determine the output shape.
Raises
------
TypeError
If any in ``cosmos`` is not a |Cosmology| and the corresponding
``format`` equals `False`.
"""
formats = np.broadcast_to(np.array(format, dtype=object), len(cosmos))
# parse each cosmo & format
# Have to deal with things that do not broadcast well.
# astropy.row cannot be used in an array, even if dtype=object
# and will raise a segfault when used in a ufunc.
towrap = (isinstance(cosmo, _CosmologyWrapper._cantbroadcast) for cosmo in cosmos)
wcosmos = [
c if not wrap else _CosmologyWrapper(c) for c, wrap in zip(cosmos, towrap)
]
return _parse_format(wcosmos, formats)
def _comparison_decorator(pyfunc: Callable[..., Any]) -> Callable[..., Any]:
"""Decorator to make wrapper function that parses |Cosmology|-like inputs.
Parameters
----------
pyfunc : Python function object
An arbitrary Python function.
Returns
-------
callable[..., Any]
Wrapped `pyfunc`, as described above.
Notes
-----
All decorated functions should add the following to 'Parameters'.
format : bool or None or str or array-like thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a |Cosmology|. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. Note ``format``
is broadcast as an object array to match the shape of ``cosmos`` so
``format`` cannot determine the output shape.
"""
sig = inspect.signature(pyfunc)
nin = sum(p.kind == 0 for p in sig.parameters.values())
# Make wrapper function that parses cosmology-like inputs
@functools.wraps(pyfunc)
def wrapper(*cosmos: Any, format: _FormatsT = False, **kwargs: Any) -> bool:
if len(cosmos) > nin:
raise TypeError(
f"{wrapper.__wrapped__.__name__} takes {nin} positional"
f" arguments but {len(cosmos)} were given"
)
# Parse cosmologies to format. Only do specified number.
cosmos = _parse_formats(*cosmos, format=format)
# Evaluate pyfunc, erroring if didn't match specified number.
result = wrapper.__wrapped__(*cosmos, **kwargs)
# Return, casting to correct type casting is possible.
return result
return wrapper
##############################################################################
# COMPARISON FUNCTIONS
@_comparison_decorator
def cosmology_equal(
cosmo1: Any, cosmo2: Any, /, *, allow_equivalent: bool = False
) -> bool:
r"""Return element-wise equality check on the cosmologies.
.. note::
Cosmologies are currently scalar in their parameters.
Parameters
----------
cosmo1, cosmo2 : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by ``format``.
format : bool or None or str or tuple thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a |Cosmology|. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. Note ``format``
is broadcast as an object array to match the shape of ``cosmos`` so
``format`` cannot determine the output shape.
allow_equivalent : bool, optional keyword-only
Whether to allow cosmologies to be equal even if not of the same class.
For example, an instance of |LambdaCDM| might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like |FlatLambdaCDM|.
Examples
--------
Assuming the following imports
>>> import astropy.units as u
>>> from astropy.cosmology import FlatLambdaCDM
Two identical cosmologies are equal.
>>> cosmo1 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmo2 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmology_equal(cosmo1, cosmo2)
True
And cosmologies with different parameters are not.
>>> cosmo3 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.4)
>>> cosmology_equal(cosmo1, cosmo3)
False
Two cosmologies may be equivalent even if not of the same class. In these
examples the |LambdaCDM| has :attr:`~astropy.cosmology.LambdaCDM.Ode0` set
to the same value calculated in |FlatLambdaCDM|.
>>> from astropy.cosmology import LambdaCDM
>>> cosmo3 = LambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, 0.7)
>>> cosmology_equal(cosmo1, cosmo3)
False
>>> cosmology_equal(cosmo1, cosmo3, allow_equivalent=True)
True
While in this example, the cosmologies are not equivalent.
>>> cosmo4 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, Tcmb0=3 * u.K)
>>> cosmology_equal(cosmo3, cosmo4, allow_equivalent=True)
False
Also, using the keyword argument, the notion of equality is extended to any
Python object that can be converted to a |Cosmology|.
>>> mapping = cosmo2.to_format("mapping")
>>> cosmology_equal(cosmo1, mapping, format=True)
True
Either (or both) arguments can be |Cosmology|-like.
>>> cosmology_equal(mapping, cosmo2, format=True)
True
The list of valid formats, e.g. the |Table| in this example, may be checked
with ``Cosmology.from_format.list_formats()``.
As can be seen in the list of formats, not all formats can be
auto-identified by ``Cosmology.from_format.registry``. Objects of these
kinds can still be checked for equality, but the correct format string must
be used.
>>> yml = cosmo2.to_format("yaml")
>>> cosmology_equal(cosmo1, yml, format=(None, "yaml"))
True
This also works with an array of ``format`` matching the number of
cosmologies.
>>> cosmology_equal(mapping, yml, format=[True, "yaml"])
True
"""
# Check parameter equality
if not allow_equivalent:
eq = cosmo1 == cosmo2
else:
# Check parameter equivalence
# The options are: 1) same class & parameters; 2) same class, different
# parameters; 3) different classes, equivalent parameters; 4) different
# classes, different parameters. (1) & (3) => True, (2) & (4) => False.
eq = cosmo1.__equiv__(cosmo2)
if eq is NotImplemented:
eq = cosmo2.__equiv__(cosmo1) # that failed, try from 'other'
eq = False if eq is NotImplemented else eq
# TODO! include equality check of metadata
return eq
@_comparison_decorator
def _cosmology_not_equal(
cosmo1: Any, cosmo2: Any, /, *, allow_equivalent: bool = False
) -> bool:
r"""Return element-wise cosmology non-equality check.
.. note::
Cosmologies are currently scalar in their parameters.
Parameters
----------
cosmo1, cosmo2 : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by ``format``.
out : ndarray, None, optional
A location into which the result is stored. If provided, it must have a
shape that the inputs broadcast to. If not provided or None, a
freshly-allocated array is returned.
format : bool or None or str or tuple thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a Cosmology. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. ``format`` is
broadcast to match the shape of the cosmology arguments. Note that the
cosmology arguments are not broadcast against ``format``, so it cannot
determine the output shape.
allow_equivalent : bool, optional keyword-only
Whether to allow cosmologies to be equal even if not of the same class.
For example, an instance of |LambdaCDM| might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like |FlatLambdaCDM|.
See Also
--------
astropy.cosmology.cosmology_equal
Element-wise equality check, with argument conversion to Cosmology.
"""
neq = not cosmology_equal(cosmo1, cosmo2, allow_equivalent=allow_equivalent)
# TODO! it might eventually be worth the speed boost to implement some of
# the internals of cosmology_equal here, but for now it's a hassle.
return neq
|
e38938de588dabaaf72d8a438fe2d86ad24965610a96daa0bda8860ae7e14ff6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions for `astropy.cosmology`."""
from .comparison import cosmology_equal
# _z_at_scalar_value is imported for backards compat
from .optimize import _z_at_scalar_value, z_at_value
__all__ = ["z_at_value", "cosmology_equal"]
|
f6f35dfb24c755f31945b957d27d4825e03b9e816550bd9b8dbfba449dfbbe3a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Convenience functions for `astropy.cosmology`.
"""
import warnings
import numpy as np
from astropy.cosmology import units as cu
from astropy.cosmology.core import CosmologyError
from astropy.units import Quantity
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["z_at_value"]
__doctest_requires__ = {"*": ["scipy"]}
def _z_at_scalar_value(
func,
fval,
zmin=1e-8,
zmax=1000,
ztol=1e-8,
maxfun=500,
method="Brent",
bracket=None,
verbose=False,
):
"""
Find the redshift ``z`` at which ``func(z) = fval``.
See :func:`astropy.cosmology.funcs.z_at_value`.
"""
from scipy.optimize import minimize_scalar
opt = {"maxiter": maxfun}
# Assume custom methods support the same options as default; otherwise user
# will see warnings.
if str(method).lower() == "bounded":
opt["xatol"] = ztol
if bracket is not None:
warnings.warn(f"Option 'bracket' is ignored by method {method}.")
bracket = None
else:
opt["xtol"] = ztol
# fval falling inside the interval of bracketing function values does not
# guarantee it has a unique solution, but for Standard Cosmological
# quantities normally should (being monotonic or having a single extremum).
# In these cases keep solver from returning solutions outside of bracket.
fval_zmin, fval_zmax = func(zmin), func(zmax)
nobracket = False
if np.sign(fval - fval_zmin) != np.sign(fval_zmax - fval):
if bracket is None:
nobracket = True
else:
fval_brac = func(np.asanyarray(bracket))
if np.sign(fval - fval_brac[0]) != np.sign(fval_brac[-1] - fval):
nobracket = True
else:
zmin, zmax = bracket[0], bracket[-1]
fval_zmin, fval_zmax = fval_brac[[0, -1]]
if nobracket:
warnings.warn(
f"fval is not bracketed by func(zmin)={fval_zmin} and "
f"func(zmax)={fval_zmax}. This means either there is no "
"solution, or that there is more than one solution "
"between zmin and zmax satisfying fval = func(z).",
AstropyUserWarning,
)
if isinstance(fval_zmin, Quantity):
val = fval.to_value(fval_zmin.unit)
else:
val = fval
# 'Brent' and 'Golden' ignore `bounds`, force solution inside zlim
def f(z):
if z > zmax:
return 1.0e300 * (1.0 + z - zmax)
elif z < zmin:
return 1.0e300 * (1.0 + zmin - z)
elif isinstance(fval_zmin, Quantity):
return abs(func(z).value - val)
else:
return abs(func(z) - val)
res = minimize_scalar(
f, method=method, bounds=(zmin, zmax), bracket=bracket, options=opt
)
# Scipy docs state that `OptimizeResult` always has 'status' and 'message'
# attributes, but only `_minimize_scalar_bounded()` seems to have really
# implemented them.
if not res.success:
warnings.warn(
f"Solver returned {res.get('status')}:"
f" {res.get('message', 'Unsuccessful')}\nPrecision {res.fun} reached after"
f" {res.nfev} function calls.",
AstropyUserWarning,
)
if verbose:
print(res)
if np.allclose(res.x, zmax):
raise CosmologyError(
f"Best guess z={res.x} is very close to the upper z limit {zmax}."
"\nTry re-running with a different zmax."
)
elif np.allclose(res.x, zmin):
raise CosmologyError(
f"Best guess z={res.x} is very close to the lower z limit {zmin}."
"\nTry re-running with a different zmin."
)
return res.x
def z_at_value(
func,
fval,
zmin=1e-8,
zmax=1000,
ztol=1e-8,
maxfun=500,
method="Brent",
bracket=None,
verbose=False,
):
"""Find the redshift ``z`` at which ``func(z) = fval``.
This finds the redshift at which one of the cosmology functions or
methods (for example Planck13.distmod) is equal to a known value.
.. warning::
Make sure you understand the behavior of the function that you are
trying to invert! Depending on the cosmology, there may not be a
unique solution. For example, in the standard Lambda CDM cosmology,
there are two redshifts which give an angular diameter distance of
1500 Mpc, z ~ 0.7 and z ~ 3.8. To force ``z_at_value`` to find the
solution you are interested in, use the ``zmin`` and ``zmax`` keywords
to limit the search range (see the example below).
Parameters
----------
func : function or method
A function that takes a redshift as input.
fval : `~astropy.units.Quantity`
The (scalar or array) value of ``func(z)`` to recover.
zmin : float or array-like['dimensionless'] or quantity-like, optional
The lower search limit for ``z``. Beware of divergences
in some cosmological functions, such as distance moduli,
at z=0 (default 1e-8).
zmax : float or array-like['dimensionless'] or quantity-like, optional
The upper search limit for ``z`` (default 1000).
ztol : float or array-like['dimensionless'], optional
The relative error in ``z`` acceptable for convergence.
maxfun : int or array-like, optional
The maximum number of function evaluations allowed in the
optimization routine (default 500).
method : str or callable, optional
Type of solver to pass to the minimizer. The built-in options provided
by :func:`~scipy.optimize.minimize_scalar` are 'Brent' (default),
'Golden' and 'Bounded' with names case insensitive - see documentation
there for details. It also accepts a custom solver by passing any
user-provided callable object that meets the requirements listed
therein under the Notes on "Custom minimizers" - or in more detail in
:doc:`scipy:tutorial/optimize` - although their use is currently
untested.
.. versionadded:: 4.3
bracket : sequence or object array[sequence], optional
For methods 'Brent' and 'Golden', ``bracket`` defines the bracketing
interval and can either have three items (z1, z2, z3) so that
z1 < z2 < z3 and ``func(z2) < func (z1), func(z3)`` or two items z1
and z3 which are assumed to be a starting interval for a downhill
bracket search. For non-monotonic functions such as angular diameter
distance this may be used to start the search on the desired side of
the maximum, but see Examples below for usage notes.
.. versionadded:: 4.3
verbose : bool, optional
Print diagnostic output from solver (default `False`).
.. versionadded:: 4.3
Returns
-------
z : `~astropy.units.Quantity` ['redshift']
The redshift ``z`` satisfying ``zmin < z < zmax`` and ``func(z) =
fval`` within ``ztol``. Has units of cosmological redshift.
Warns
-----
:class:`~astropy.utils.exceptions.AstropyUserWarning`
If ``fval`` is not bracketed by ``func(zmin)=fval(zmin)`` and
``func(zmax)=fval(zmax)``.
If the solver was not successful.
Raises
------
:class:`astropy.cosmology.CosmologyError`
If the result is very close to either ``zmin`` or ``zmax``.
ValueError
If ``bracket`` is not an array nor a 2 (or 3) element sequence.
TypeError
If ``bracket`` is not an object array. 2 (or 3) element sequences will
be turned into object arrays, so this error should only occur if a
non-object array is used for ``bracket``.
Notes
-----
This works for any arbitrary input cosmology, but is inefficient if you
want to invert a large number of values for the same cosmology. In this
case, it is faster to instead generate an array of values at many
closely-spaced redshifts that cover the relevant redshift range, and then
use interpolation to find the redshift at each value you are interested
in. For example, to efficiently find the redshifts corresponding to 10^6
values of the distance modulus in a Planck13 cosmology, you could do the
following:
>>> import astropy.units as u
>>> from astropy.cosmology import Planck13, z_at_value
Generate 10^6 distance moduli between 24 and 44 for which we
want to find the corresponding redshifts:
>>> Dvals = (24 + np.random.rand(1000000) * 20) * u.mag
Make a grid of distance moduli covering the redshift range we
need using 50 equally log-spaced values between zmin and
zmax. We use log spacing to adequately sample the steep part of
the curve at low distance moduli:
>>> zmin = z_at_value(Planck13.distmod, Dvals.min())
>>> zmax = z_at_value(Planck13.distmod, Dvals.max())
>>> zgrid = np.geomspace(zmin, zmax, 50)
>>> Dgrid = Planck13.distmod(zgrid)
Finally interpolate to find the redshift at each distance modulus:
>>> zvals = np.interp(Dvals.value, Dgrid.value, zgrid)
Examples
--------
>>> import astropy.units as u
>>> from astropy.cosmology import Planck13, Planck18, z_at_value
The age and lookback time are monotonic with redshift, and so a
unique solution can be found:
>>> z_at_value(Planck13.age, 2 * u.Gyr) # doctest: +FLOAT_CMP
<Quantity 3.19812268 redshift>
The angular diameter is not monotonic however, and there are two
redshifts that give a value of 1500 Mpc. You can use the zmin and
zmax keywords to find the one you are interested in:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, zmax=1.5) # doctest: +FLOAT_CMP
<Quantity 0.68044452 redshift>
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, zmin=2.5) # doctest: +FLOAT_CMP
<Quantity 3.7823268 redshift>
Alternatively the ``bracket`` option may be used to initialize the
function solver on a desired region, but one should be aware that this
does not guarantee it will remain close to this starting bracket.
For the example of angular diameter distance, which has a maximum near
a redshift of 1.6 in this cosmology, defining a bracket on either side
of this maximum will often return a solution on the same side:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(1.0, 1.2)) # doctest: +FLOAT_CMP +IGNORE_WARNINGS
<Quantity 0.68044452 redshift>
But this is not ascertained especially if the bracket is chosen too wide
and/or too close to the turning point:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(0.1, 1.5)) # doctest: +SKIP
<Quantity 3.7823268 redshift> # doctest: +SKIP
Likewise, even for the same minimizer and same starting conditions different
results can be found depending on architecture or library versions:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(2.0, 2.5)) # doctest: +SKIP
<Quantity 3.7823268 redshift> # doctest: +SKIP
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(2.0, 2.5)) # doctest: +SKIP
<Quantity 0.68044452 redshift> # doctest: +SKIP
It is therefore generally safer to use the 3-parameter variant to ensure
the solution stays within the bracketing limits:
>>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc,
... bracket=(0.1, 1.0, 1.5)) # doctest: +FLOAT_CMP
<Quantity 0.68044452 redshift>
Also note that the luminosity distance and distance modulus (two
other commonly inverted quantities) are monotonic in flat and open
universes, but not in closed universes.
All the arguments except ``func``, ``method`` and ``verbose`` accept array
inputs. This does NOT use interpolation tables or any method to speed up
evaluations, rather providing a convenient means to broadcast arguments
over an element-wise scalar evaluation.
The most common use case for non-scalar input is to evaluate 'func' for an
array of ``fval``:
>>> z_at_value(Planck13.age, [2, 7] * u.Gyr) # doctest: +FLOAT_CMP
<Quantity [3.19812061, 0.75620443] redshift>
``fval`` can be any shape:
>>> z_at_value(Planck13.age, [[2, 7], [1, 3]]*u.Gyr) # doctest: +FLOAT_CMP
<Quantity [[3.19812061, 0.75620443],
[5.67661227, 2.19131955]] redshift>
Other arguments can be arrays. For non-monotic functions -- for example,
the angular diameter distance -- this can be useful to find all solutions.
>>> z_at_value(Planck13.angular_diameter_distance, 1500 * u.Mpc,
... zmin=[0, 2.5], zmax=[2, 4]) # doctest: +FLOAT_CMP
<Quantity [0.68127747, 3.79149062] redshift>
The ``bracket`` argument can likewise be be an array. However, since
bracket must already be a sequence (or None), it MUST be given as an
object `numpy.ndarray`. Importantly, the depth of the array must be such
that each bracket subsequence is an object. Errors or unexpected results
will happen otherwise. A convenient means to ensure the right depth is by
including a length-0 tuple as a bracket and then truncating the object
array to remove the placeholder. This can be seen in the following
example:
>>> bracket=np.array([(1.0, 1.2),(2.0, 2.5), ()], dtype=object)[:-1]
>>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc,
... bracket=bracket) # doctest: +SKIP
<Quantity [0.68044452, 3.7823268] redshift>
"""
# `fval` can be a Quantity, which isn't (yet) compatible w/ `numpy.nditer`
# so we strip it of units for broadcasting and restore the units when
# passing the elements to `_z_at_scalar_value`.
fval = np.asanyarray(fval)
unit = getattr(fval, "unit", 1) # can be unitless
zmin = Quantity(zmin, cu.redshift).value # must be unitless
zmax = Quantity(zmax, cu.redshift).value
# bracket must be an object array (assumed to be correct) or a 'scalar'
# bracket: 2 or 3 elt sequence
if not isinstance(bracket, np.ndarray): # 'scalar' bracket
if bracket is not None and len(bracket) not in (2, 3):
raise ValueError(
"`bracket` is not an array nor a 2 (or 3) element sequence."
)
else: # munge bracket into a 1-elt object array
bracket = np.array([bracket, ()], dtype=object)[:1].squeeze()
if bracket.dtype != np.object_:
raise TypeError(f"`bracket` has dtype {bracket.dtype}, not 'O'")
# make multi-dimensional iterator for all but `method`, `verbose`
with np.nditer(
[fval, zmin, zmax, ztol, maxfun, bracket, None],
flags=["refs_ok"],
op_flags=[
*[["readonly"]] * 6, # ← inputs output ↓
["writeonly", "allocate", "no_subtype"],
],
op_dtypes=(*(None,) * 6, fval.dtype),
casting="no",
) as it:
for fv, zmn, zmx, zt, mfe, bkt, zs in it: # ← eltwise unpack & eval ↓
zs[...] = _z_at_scalar_value(
func,
fv * unit,
zmin=zmn,
zmax=zmx,
ztol=zt,
maxfun=mfe,
bracket=bkt.item(),
# not broadcasted
method=method,
verbose=verbose,
)
# since bracket is an object array, the output will be too, so it is
# cast to the same type as the function value.
result = it.operands[-1] # zs
return result << cu.redshift
|
075b96c285c75124b306739f2a03782060cdcaf3bf9ffeabb91cc49231d015a3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The following are private functions, included here **FOR REFERENCE ONLY** since
the io registry cannot be displayed. These functions are registered into
:meth:`~astropy.cosmology.Cosmology.to_format` and
:meth:`~astropy.cosmology.Cosmology.from_format` and should only be accessed
via these methods.
""" # this is shown in the docs.
import abc
import copy
import inspect
import numpy as np
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import Cosmology
from astropy.modeling import FittableModel, Model
from astropy.utils.decorators import classproperty
from .utils import convert_parameter_to_model_parameter
__all__ = [] # nothing is publicly scoped
class _CosmologyModel(FittableModel):
"""Base class for Cosmology redshift-method Models.
.. note::
This class is not publicly scoped so should not be used directly.
Instead, from a Cosmology instance use ``.to_format("astropy.model")``
to create an instance of a subclass of this class.
`_CosmologyModel` (subclasses) wrap a redshift-method of a
:class:`~astropy.cosmology.Cosmology` class, converting each non-`None`
|Cosmology| :class:`~astropy.cosmology.Parameter` to a
:class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter`
and the redshift-method to the model's ``__call__ / evaluate``.
See Also
--------
astropy.cosmology.Cosmology.to_format
"""
@abc.abstractmethod
def _cosmology_class(self):
"""Cosmology class as a private attribute. Set in subclasses."""
@abc.abstractmethod
def _method_name(self):
"""Cosmology method name as a private attribute. Set in subclasses."""
@classproperty
def cosmology_class(cls):
"""|Cosmology| class."""
return cls._cosmology_class
@property
def cosmology(self):
"""Return |Cosmology| using `~astropy.modeling.Parameter` values."""
cosmo = self._cosmology_class(
name=self.name,
**{
k: (v.value if not (v := getattr(self, k)).unit else v.quantity)
for k in self.param_names
},
)
return cosmo
@classproperty
def method_name(self):
"""Redshift-method name on |Cosmology| instance."""
return self._method_name
# ---------------------------------------------------------------
def evaluate(self, *args, **kwargs):
"""Evaluate method {method!r} of {cosmo_cls!r} Cosmology.
The Model wraps the :class:`~astropy.cosmology.Cosmology` method,
converting each |Cosmology| :class:`~astropy.cosmology.Parameter` to a
:class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter`
(unless the Parameter is None, in which case it is skipped).
Here an instance of the cosmology is created using the current
Parameter values and the method is evaluated given the input.
Parameters
----------
*args, **kwargs
The first ``n_inputs`` of ``*args`` are for evaluating the method
of the cosmology. The remaining args and kwargs are passed to the
cosmology class constructor.
Any unspecified Cosmology Parameter use the current value of the
corresponding Model Parameter.
Returns
-------
Any
Results of evaluating the Cosmology method.
"""
# create BoundArgument with all available inputs beyond the Parameters,
# which will be filled in next
ba = self.cosmology_class._init_signature.bind_partial(
*args[self.n_inputs :], **kwargs
)
# fill in missing Parameters
for k in self.param_names:
if k not in ba.arguments:
v = getattr(self, k)
ba.arguments[k] = v.value if not v.unit else v.quantity
# unvectorize, since Cosmology is not vectorized
# TODO! remove when vectorized
if np.shape(ba.arguments[k]): # only in __call__
# m_nu is a special case # TODO! fix by making it 'structured'
if k == "m_nu" and len(ba.arguments[k].shape) == 1:
continue
ba.arguments[k] = ba.arguments[k][0]
# make instance of cosmology
cosmo = self._cosmology_class(**ba.arguments)
# evaluate method
result = getattr(cosmo, self._method_name)(*args[: self.n_inputs])
return result
##############################################################################
def from_model(model):
"""Load |Cosmology| from `~astropy.modeling.Model` object.
Parameters
----------
model : `_CosmologyModel` subclass instance
See ``Cosmology.to_format.help("astropy.model") for details.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Examples
--------
>>> from astropy.cosmology import Cosmology, Planck18
>>> model = Planck18.to_format("astropy.model", method="lookback_time")
>>> Cosmology.from_format(model)
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
"""
cosmology = model.cosmology_class
meta = copy.deepcopy(model.meta)
# assemble the Parameters
params = {}
for n in model.param_names:
p = getattr(model, n)
params[p.name] = p.quantity if p.unit else p.value
# put all attributes in a dict
meta[p.name] = {
n: getattr(p, n)
for n in dir(p)
if not (n.startswith("_") or callable(getattr(p, n)))
}
ba = cosmology._init_signature.bind(name=model.name, **params, meta=meta)
return cosmology(*ba.args, **ba.kwargs)
def to_model(cosmology, *_, method):
"""Convert a `~astropy.cosmology.Cosmology` to a `~astropy.modeling.Model`.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
method : str, keyword-only
The name of the method on the ``cosmology``.
Returns
-------
`_CosmologyModel` subclass instance
The Model wraps the |Cosmology| method, converting each non-`None`
:class:`~astropy.cosmology.Parameter` to a
:class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter`
and the method to the model's ``__call__ / evaluate``.
Examples
--------
>>> from astropy.cosmology import Planck18
>>> model = Planck18.to_format("astropy.model", method="lookback_time")
>>> model
<FlatLambdaCDMCosmologyLookbackTimeModel(H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. , 0. , 0.06] eV, Ob0=0.04897,
name='Planck18')>
"""
cosmo_cls = cosmology.__class__
# get bound method & sig from cosmology (unbound if class).
if not hasattr(cosmology, method):
raise AttributeError(f"{method} is not a method on {cosmology.__class__}.")
func = getattr(cosmology, method)
if not callable(func):
raise ValueError(f"{cosmology.__class__}.{method} is not callable.")
msig = inspect.signature(func)
# introspect for number of positional inputs, ignoring "self"
n_inputs = len([p for p in tuple(msig.parameters.values()) if (p.kind in (0, 1))])
attrs = {} # class attributes
attrs["_cosmology_class"] = cosmo_cls
attrs["_method_name"] = method
attrs["n_inputs"] = n_inputs
attrs["n_outputs"] = 1
params = {} # Parameters (also class attributes)
for n in cosmology.__parameters__:
v = getattr(cosmology, n) # parameter value
if v is None: # skip unspecified parameters
continue
# add as Model Parameter
params[n] = convert_parameter_to_model_parameter(
getattr(cosmo_cls, n), v, cosmology.meta.get(n)
)
# class name is cosmology name + Cosmology + method name + Model
clsname = (
cosmo_cls.__qualname__.replace(".", "_")
+ "Cosmology"
+ method.replace("_", " ").title().replace(" ", "")
+ "Model"
)
# make Model class
CosmoModel = type(clsname, (_CosmologyModel,), {**attrs, **params})
# override __signature__ and format the doc.
setattr(CosmoModel.evaluate, "__signature__", msig)
CosmoModel.evaluate.__doc__ = CosmoModel.evaluate.__doc__.format(
cosmo_cls=cosmo_cls.__qualname__, method=method
)
# instantiate class using default values
ps = {n: getattr(cosmology, n) for n in params.keys()}
model = CosmoModel(**ps, name=cosmology.name, meta=copy.deepcopy(cosmology.meta))
return model
def model_identify(origin, format, *args, **kwargs):
"""Identify if object uses the :class:`~astropy.modeling.Model` format.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Model) and (format in (None, "astropy.model"))
return itis
# ===================================================================
# Register
convert_registry.register_reader("astropy.model", Cosmology, from_model)
convert_registry.register_writer("astropy.model", Cosmology, to_model)
convert_registry.register_identifier("astropy.model", Cosmology, model_identify)
|
3a9b778fe0e9758afdbc84f2b6c67b440dc81d508f2eedc50a9938ef164901eb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
from collections import defaultdict
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import Cosmology
from astropy.table import QTable, Row
from .mapping import from_mapping
def from_row(row, *, move_to_meta=False, cosmology=None):
"""Instantiate a `~astropy.cosmology.Cosmology` from a `~astropy.table.Row`.
Parameters
----------
row : `~astropy.table.Row`
The object containing the Cosmology information.
move_to_meta : bool (optional, keyword-only)
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict
(e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta``
will be ``{'key': 10}``).
cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only)
The cosmology class (or string name thereof) to use when constructing
the cosmology instance. The class also provides default parameter values,
filling in any non-mandatory arguments missing in 'table'.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Examples
--------
To see loading a `~astropy.cosmology.Cosmology` from a Row with
``from_row``, we will first make a `~astropy.table.Row` using
:func:`~astropy.cosmology.Cosmology.to_format`.
>>> from astropy.cosmology import Cosmology, Planck18
>>> cr = Planck18.to_format("astropy.row")
>>> cr
<Row index=0>
cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str13 str8 float64 float64 float64 float64 float64[3] float64
------------- -------- ------------ ------- ------- ------- ----------- -------
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
Now this row can be used to load a new cosmological instance identical
to the ``Planck18`` cosmology from which it was generated.
>>> cosmo = Cosmology.from_format(cr, format="astropy.row")
>>> cosmo
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
"""
# special values
name = row["name"] if "name" in row.columns else None # get name from column
meta = defaultdict(dict, copy.deepcopy(row.meta))
# Now need to add the Columnar metadata. This is only available on the
# parent table. If Row is ever separated from Table, this should be moved
# to ``to_table``.
for col in row._table.itercols():
if col.info.meta: # Only add metadata if not empty
meta[col.name].update(col.info.meta)
# turn row into mapping, filling cosmo if not in a column
mapping = dict(row)
mapping["name"] = name
mapping.setdefault("cosmology", meta.pop("cosmology", None))
mapping["meta"] = dict(meta)
# build cosmology from map
return from_mapping(mapping, move_to_meta=move_to_meta, cosmology=cosmology)
def to_row(cosmology, *args, cosmology_in_meta=False, table_cls=QTable):
"""Serialize the cosmology into a `~astropy.table.Row`.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
*args
Not used. Needed for compatibility with
`~astropy.io.registry.UnifiedReadWriteMethod`
table_cls : type (optional, keyword-only)
Astropy :class:`~astropy.table.Table` class or subclass type to use.
Default is :class:`~astropy.table.QTable`.
cosmology_in_meta : bool
Whether to put the cosmology class in the Table metadata (if `True`) or
as the first column (if `False`, default).
Returns
-------
`~astropy.table.Row`
With columns for the cosmology parameters, and metadata in the Table's
``meta`` attribute. The cosmology class name will either be a column
or in ``meta``, depending on 'cosmology_in_meta'.
Examples
--------
A Cosmology as a `~astropy.table.Row` will have the cosmology's name and
parameters as columns.
>>> from astropy.cosmology import Planck18
>>> cr = Planck18.to_format("astropy.row")
>>> cr
<Row index=0>
cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str13 str8 float64 float64 float64 float64 float64[3] float64
------------- -------- ------------ ------- ------- ------- ----------- -------
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
The cosmological class and other metadata, e.g. a paper reference, are in
the Table's metadata.
"""
from .table import to_table
table = to_table(cosmology, cls=table_cls, cosmology_in_meta=cosmology_in_meta)
return table[0] # extract row from table
def row_identify(origin, format, *args, **kwargs):
"""Identify if object uses the `~astropy.table.Row` format.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Row) and (format in (None, "astropy.row"))
return itis
# ===================================================================
# Register
convert_registry.register_reader("astropy.row", Cosmology, from_row)
convert_registry.register_writer("astropy.row", Cosmology, to_row)
convert_registry.register_identifier("astropy.row", Cosmology, row_identify)
|
66a763ad6a24cfd44aa48547e023dac54ce01efba89e80b9b6f81d96f8f87c53 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The following are private functions. These functions are registered into
:meth:`~astropy.cosmology.Cosmology.to_format` and
:meth:`~astropy.cosmology.Cosmology.from_format` and should only be accessed
via these methods.
"""
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology
__all__ = [] # nothing is publicly scoped
def from_cosmology(cosmo, /, **kwargs):
"""Return the |Cosmology| unchanged.
Parameters
----------
cosmo : `~astropy.cosmology.Cosmology`
The cosmology to return.
**kwargs
This argument is required for compatibility with the standard set of
keyword arguments in format `~astropy.cosmology.Cosmology.from_format`,
e.g. "cosmology". If "cosmology" is included and is not `None`,
``cosmo`` is checked for correctness.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Just ``cosmo`` passed through.
Raises
------
TypeError
If the |Cosmology| object is not an instance of ``cosmo`` (and
``cosmology`` is not `None`).
"""
# Check argument `cosmology`
cosmology = kwargs.get("cosmology")
if isinstance(cosmology, str):
cosmology = _COSMOLOGY_CLASSES[cosmology]
if cosmology is not None and not isinstance(cosmo, cosmology):
raise TypeError(f"cosmology {cosmo} is not an {cosmology} instance.")
return cosmo
def to_cosmology(cosmo, *args):
"""Return the |Cosmology| unchanged.
Parameters
----------
cosmo : `~astropy.cosmology.Cosmology`
The cosmology to return.
*args
Not used.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Just ``cosmo`` passed through.
"""
return cosmo
def cosmology_identify(origin, format, *args, **kwargs):
"""Identify if object is a `~astropy.cosmology.Cosmology`.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Cosmology) and (
format in (None, "astropy.cosmology")
)
return itis
# ===================================================================
# Register
convert_registry.register_reader("astropy.cosmology", Cosmology, from_cosmology)
convert_registry.register_writer("astropy.cosmology", Cosmology, to_cosmology)
convert_registry.register_identifier("astropy.cosmology", Cosmology, cosmology_identify)
|
fb1bd98490c6619e59b3ff736b43544f7d6689b2a82431e24c31654e959ef43c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import Cosmology
from astropy.table import Column, QTable, Table
from .mapping import to_mapping
from .row import from_row
from .utils import convert_parameter_to_column
def from_table(table, index=None, *, move_to_meta=False, cosmology=None):
"""Instantiate a `~astropy.cosmology.Cosmology` from a |QTable|.
Parameters
----------
table : `~astropy.table.Table`
The object to parse into a |Cosmology|.
index : int, str, or None, optional
Needed to select the row in tables with multiple rows. ``index`` can be
an integer for the row number or, if the table is indexed by a column,
the value of that column. If the table is not indexed and ``index``
is a string, the "name" column is used as the indexing column.
move_to_meta : bool (optional, keyword-only)
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict
(e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta``
will be ``{'key': 10}``).
cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only)
The cosmology class (or string name thereof) to use when constructing
the cosmology instance. The class also provides default parameter values,
filling in any non-mandatory arguments missing in 'table'.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Examples
--------
To see loading a `~astropy.cosmology.Cosmology` from a Table with
``from_table``, we will first make a |QTable| using
:func:`~astropy.cosmology.Cosmology.to_format`.
>>> from astropy.cosmology import Cosmology, Planck18
>>> ct = Planck18.to_format("astropy.table")
>>> ct
<QTable length=1>
name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str8 float64 float64 float64 float64 float64[3] float64
-------- ------------ ------- ------- ------- ----------- -------
Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
Now this table can be used to load a new cosmological instance identical
to the ``Planck18`` cosmology from which it was generated.
>>> cosmo = Cosmology.from_format(ct, format="astropy.table")
>>> cosmo
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
Specific cosmology classes can be used to parse the data. The class'
default parameter values are used to fill in any information missing in the
data.
>>> from astropy.cosmology import FlatLambdaCDM
>>> del ct["Tcmb0"] # show FlatLambdaCDM provides default
>>> FlatLambdaCDM.from_format(ct)
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=0.0 K, Neff=3.046, m_nu=None, Ob0=0.04897)
For tables with multiple rows of cosmological parameters, the ``index``
argument is needed to select the correct row. The index can be an integer
for the row number or, if the table is indexed by a column, the value of
that column. If the table is not indexed and ``index`` is a string, the
"name" column is used as the indexing column.
Here is an example where ``index`` is needed and can be either an integer
(for the row number) or the name of one of the cosmologies, e.g. 'Planck15'.
>>> from astropy.cosmology import Planck13, Planck15, Planck18
>>> from astropy.table import vstack
>>> cts = vstack([c.to_format("astropy.table")
... for c in (Planck13, Planck15, Planck18)],
... metadata_conflicts='silent')
>>> cts
<QTable length=3>
name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str8 float64 float64 float64 float64 float64[3] float64
-------- ------------ ------- ------- ------- ----------- --------
Planck13 67.77 0.30712 2.7255 3.046 0.0 .. 0.06 0.048252
Planck15 67.74 0.3075 2.7255 3.046 0.0 .. 0.06 0.0486
Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
>>> cosmo = Cosmology.from_format(cts, index=1, format="astropy.table")
>>> cosmo == Planck15
True
For further examples, see :doc:`astropy:cosmology/io`.
"""
# Get row from table
# string index uses the indexed column on the table to find the row index.
if isinstance(index, str):
if not table.indices: # no indexing column, find by string match
indices = np.where(table["name"] == index)[0]
else: # has indexing column
indices = table.loc_indices[index] # need to convert to row index (int)
if isinstance(indices, (int, np.integer)): # loc_indices
index = indices
elif len(indices) == 1: # only happens w/ np.where
index = indices[0]
elif len(indices) == 0: # matches from loc_indices
raise KeyError(f"No matches found for key {indices}")
else: # like the Highlander, there can be only 1 Cosmology
raise ValueError(f"more than one cosmology found for key {indices}")
# no index is needed for a 1-row table. For a multi-row table...
if index is None:
if len(table) != 1: # multi-row table and no index
raise ValueError(
"need to select a specific row (e.g. index=1) when "
"constructing a Cosmology from a multi-row table."
)
else: # single-row table
index = 0
row = table[index] # index is now the row index (int)
# parse row to cosmo
return from_row(row, move_to_meta=move_to_meta, cosmology=cosmology)
def to_table(cosmology, *args, cls=QTable, cosmology_in_meta=True):
"""Serialize the cosmology into a `~astropy.table.QTable`.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
*args
Not used. Needed for compatibility with
`~astropy.io.registry.UnifiedReadWriteMethod`
cls : type (optional, keyword-only)
Astropy :class:`~astropy.table.Table` class or subclass type to return.
Default is :class:`~astropy.table.QTable`.
cosmology_in_meta : bool
Whether to put the cosmology class in the Table metadata (if `True`,
default) or as the first column (if `False`).
Returns
-------
`~astropy.table.QTable`
With columns for the cosmology parameters, and metadata and
cosmology class name in the Table's ``meta`` attribute
Raises
------
TypeError
If kwarg (optional) 'cls' is not a subclass of `astropy.table.Table`
Examples
--------
A Cosmology as a `~astropy.table.QTable` will have the cosmology's name and
parameters as columns.
>>> from astropy.cosmology import Planck18
>>> ct = Planck18.to_format("astropy.table")
>>> ct
<QTable length=1>
name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str8 float64 float64 float64 float64 float64[3] float64
-------- ------------ ------- ------- ------- ----------- -------
Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
The cosmological class and other metadata, e.g. a paper reference, are in
the Table's metadata.
>>> ct.meta
OrderedDict([..., ('cosmology', 'FlatLambdaCDM')])
To move the cosmology class from the metadata to a Table row, set the
``cosmology_in_meta`` argument to `False`:
>>> Planck18.to_format("astropy.table", cosmology_in_meta=False)
<QTable length=1>
cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str13 str8 float64 float64 float64 float64 float64[3] float64
------------- -------- ------------ ------- ------- ------- ----------- -------
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
Astropy recommends `~astropy.table.QTable` for tables with
`~astropy.units.Quantity` columns. However the returned type may be
overridden using the ``cls`` argument:
>>> from astropy.table import Table
>>> Planck18.to_format("astropy.table", cls=Table)
<Table length=1>
...
"""
if not issubclass(cls, Table):
raise TypeError(f"'cls' must be a (sub)class of Table, not {type(cls)}")
# Start by getting a map representation.
data = to_mapping(cosmology)
data["cosmology"] = data["cosmology"].__qualname__ # change to str
# Metadata
meta = data.pop("meta") # remove the meta
if cosmology_in_meta:
meta["cosmology"] = data.pop("cosmology")
# Need to turn everything into something Table can process:
# - Column for Parameter
# - list for anything else
cosmo_cls = cosmology.__class__
for k, v in data.items():
if k in cosmology.__parameters__:
col = convert_parameter_to_column(
getattr(cosmo_cls, k), v, cosmology.meta.get(k)
)
else:
col = Column([v])
data[k] = col
tbl = cls(data, meta=meta)
tbl.add_index("name", unique=True)
return tbl
def table_identify(origin, format, *args, **kwargs):
"""Identify if object uses the Table format.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Table) and (format in (None, "astropy.table"))
return itis
# ===================================================================
# Register
convert_registry.register_reader("astropy.table", Cosmology, from_table)
convert_registry.register_writer("astropy.table", Cosmology, to_table)
convert_registry.register_identifier("astropy.table", Cosmology, table_identify)
|
376017135fed6af3e926e81fa3d00b53ea49c36827abc037fff3a00613099a05 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Read/Write/Interchange methods for `astropy.cosmology`. **NOT public API**.
"""
# Import to register with the I/O machinery
from . import cosmology, ecsv, html, mapping, model, row, table, yaml # noqa: F401
|
b3905adfeab0c5c999e8b02a45ab6e41bed2ee144127694672215f1787a7d6ae | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The following are private functions, included here **FOR REFERENCE ONLY** since
the io registry cannot be displayed. These functions are registered into
:meth:`~astropy.cosmology.Cosmology.to_format` and
:meth:`~astropy.cosmology.Cosmology.from_format` and should only be accessed
via these methods.
""" # this is shown in the docs.
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology
from astropy.io.misc.yaml import AstropyDumper, AstropyLoader, dump, load
from .mapping import from_mapping
from .utils import FULLQUALNAME_SUBSTITUTIONS as QNS
__all__ = [] # nothing is publicly scoped
##############################################################################
# Serializer Functions
# these do Cosmology <-> YAML through a modified dictionary representation of
# the Cosmology object. The Unified-I/O functions are just wrappers to the YAML
# that calls these functions.
def yaml_representer(tag):
""":mod:`yaml` representation of |Cosmology| object.
Parameters
----------
tag : str
The class tag, e.g. '!astropy.cosmology.LambdaCDM'
Returns
-------
representer : callable[[`~astropy.io.misc.yaml.AstropyDumper`, |Cosmology|], str]
Function to construct :mod:`yaml` representation of |Cosmology| object.
"""
def representer(dumper, obj):
"""Cosmology yaml representer function for {}.
Parameters
----------
dumper : `~astropy.io.misc.yaml.AstropyDumper`
obj : `~astropy.cosmology.Cosmology`
Returns
-------
str
:mod:`yaml` representation of |Cosmology| object.
"""
# convert to mapping
map = obj.to_format("mapping")
# remove the cosmology class info. It's already recorded in `tag`
map.pop("cosmology")
# make the metadata serializable in an order-preserving way.
map["meta"] = tuple(map["meta"].items())
return dumper.represent_mapping(tag, map)
representer.__doc__ = representer.__doc__.format(tag)
return representer
def yaml_constructor(cls):
"""Cosmology| object from :mod:`yaml` representation.
Parameters
----------
cls : type
The class type, e.g. `~astropy.cosmology.LambdaCDM`.
Returns
-------
constructor : callable
Function to construct |Cosmology| object from :mod:`yaml` representation.
"""
def constructor(loader, node):
"""Cosmology yaml constructor function.
Parameters
----------
loader : `~astropy.io.misc.yaml.AstropyLoader`
node : `yaml.nodes.MappingNode`
yaml representation of |Cosmology| object.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
"""
# create mapping from YAML node
map = loader.construct_mapping(node)
# restore metadata to dict
map["meta"] = dict(map["meta"])
# get cosmology class qualified name from node
cosmology = str(node.tag).split(".")[-1]
# create Cosmology from mapping
return from_mapping(map, move_to_meta=False, cosmology=cosmology)
return constructor
def register_cosmology_yaml(cosmo_cls):
"""Register :mod:`yaml` for Cosmology class.
Parameters
----------
cosmo_cls : `~astropy.cosmology.Cosmology` class
"""
fqn = f"{cosmo_cls.__module__}.{cosmo_cls.__qualname__}"
tag = "!" + QNS.get(
fqn, fqn
) # Possibly sub fully qualified name for a preferred path
AstropyDumper.add_representer(cosmo_cls, yaml_representer(tag))
AstropyLoader.add_constructor(tag, yaml_constructor(cosmo_cls))
##############################################################################
# Unified-I/O Functions
def from_yaml(yml, *, cosmology=None):
"""Load `~astropy.cosmology.Cosmology` from :mod:`yaml` object.
Parameters
----------
yml : str
:mod:`yaml` representation of |Cosmology| object
cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only)
The expected cosmology class (or string name thereof). This argument is
is only checked for correctness if not `None`.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Raises
------
TypeError
If the |Cosmology| object loaded from ``yml`` is not an instance of
the ``cosmology`` (and ``cosmology`` is not `None`).
"""
with u.add_enabled_units(cu):
cosmo = load(yml)
# Check argument `cosmology`, if not None
# This kwarg is required for compatibility with |Cosmology.from_format|
if isinstance(cosmology, str):
cosmology = _COSMOLOGY_CLASSES[cosmology]
if cosmology is not None and not isinstance(cosmo, cosmology):
raise TypeError(f"cosmology {cosmo} is not an {cosmology} instance.")
return cosmo
def to_yaml(cosmology, *args):
"""Return the cosmology class, parameters, and metadata as a :mod:`yaml` object.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
*args
Not used. Needed for compatibility with
`~astropy.io.registry.UnifiedReadWriteMethod`
Returns
-------
str
:mod:`yaml` representation of |Cosmology| object
"""
return dump(cosmology)
# ``read`` cannot handle non-path strings.
# TODO! this says there should be different types of I/O registries.
# not just hacking object conversion on top of file I/O.
# def yaml_identify(origin, format, *args, **kwargs):
# """Identify if object uses the yaml format.
#
# Returns
# -------
# bool
# """
# itis = False
# if origin == "read":
# itis = isinstance(args[1], str) and args[1][0].startswith("!")
# itis &= format in (None, "yaml")
#
# return itis
# ===================================================================
# Register
for cosmo_cls in _COSMOLOGY_CLASSES.values():
register_cosmology_yaml(cosmo_cls)
convert_registry.register_reader("yaml", Cosmology, from_yaml)
convert_registry.register_writer("yaml", Cosmology, to_yaml)
# convert_registry.register_identifier("yaml", Cosmology, yaml_identify)
|
7c46f6afe085cfce39bfcf0d8c1dfc9ed5f783e6bd6595d6393c8ecb1c7f1baa | import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology.connect import readwrite_registry
from astropy.cosmology.core import Cosmology
from astropy.cosmology.parameter import Parameter
from astropy.table import QTable
from .table import from_table, to_table
# Format look-up for conversion, {original_name: new_name}
# TODO! move this information into the Parameters themselves
_FORMAT_TABLE = {
"H0": "$$H_{0}$$",
"Om0": "$$\\Omega_{m,0}$$",
"Ode0": "$$\\Omega_{\\Lambda,0}$$",
"Tcmb0": "$$T_{0}$$",
"Neff": "$$N_{eff}$$",
"m_nu": "$$m_{nu}$$",
"Ob0": "$$\\Omega_{b,0}$$",
"w0": "$$w_{0}$$",
"wa": "$$w_{a}$$",
"wz": "$$w_{z}$$",
"wp": "$$w_{p}$$",
"zp": "$$z_{p}$$",
}
def read_html_table(
filename,
index=None,
*,
move_to_meta=False,
cosmology=None,
latex_names=True,
**kwargs,
):
"""Read a |Cosmology| from an HTML file.
Parameters
----------
filename : path-like or file-like
From where to read the Cosmology.
index : int or str or None, optional
Needed to select the row in tables with multiple rows. ``index`` can be
an integer for the row number or, if the table is indexed by a column,
the value of that column. If the table is not indexed and ``index`` is a
string, the "name" column is used as the indexing column.
move_to_meta : bool, optional keyword-only
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict (e.g. for
``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be
``{'key': 10}``).
cosmology : str or |Cosmology| class or None, optional keyword-only
The cosmology class (or string name thereof) to use when constructing
the cosmology instance. The class also provides default parameter
values, filling in any non-mandatory arguments missing in 'table'.
latex_names : bool, optional keyword-only
Whether the |Table| (might) have latex column names for the parameters
that need to be mapped to the correct parameter name -- e.g. $$H_{0}$$
to 'H0'. This is `True` by default, but can be turned off (set to
`False`) if there is a known name conflict (e.g. both an 'H0' and
'$$H_{0}$$' column) as this will raise an error. In this case, the
correct name ('H0') is preferred.
**kwargs : Any
Passed to :attr:`astropy.table.QTable.read`. ``format`` is set to
'ascii.html', regardless of input.
Returns
-------
|Cosmology| subclass instance
Raises
------
ValueError
If the keyword argument 'format' is given and is not "ascii.html".
"""
# Check that the format is 'ascii.html' (or not specified)
format = kwargs.pop("format", "ascii.html")
if format != "ascii.html":
raise ValueError(f"format must be 'ascii.html', not {format}")
# Reading is handled by `QTable`.
with u.add_enabled_units(cu): # (cosmology units not turned on by default)
table = QTable.read(filename, format="ascii.html", **kwargs)
# Need to map the table's column names to Cosmology inputs (parameter
# names).
# TODO! move the `latex_names` into `from_table`
if latex_names:
table_columns = set(table.colnames)
for name, latex in _FORMAT_TABLE.items():
if latex in table_columns:
table.rename_column(latex, name)
# Build the cosmology from table, using the private backend.
return from_table(
table, index=index, move_to_meta=move_to_meta, cosmology=cosmology
)
def write_html_table(
cosmology, file, *, overwrite=False, cls=QTable, latex_names=False, **kwargs
):
r"""Serialize the |Cosmology| into a HTML table.
Parameters
----------
cosmology : |Cosmology| subclass instance file : path-like or file-like
Location to save the serialized cosmology.
file : path-like or file-like
Where to write the html table.
overwrite : bool, optional keyword-only
Whether to overwrite the file, if it exists.
cls : |Table| class, optional keyword-only
Astropy |Table| (sub)class to use when writing. Default is |QTable|
class.
latex_names : bool, optional keyword-only
Whether to format the parameters (column) names to latex -- e.g. 'H0' to
$$H_{0}$$.
**kwargs : Any
Passed to ``cls.write``.
Raises
------
TypeError
If the optional keyword-argument 'cls' is not a subclass of |Table|.
ValueError
If the keyword argument 'format' is given and is not "ascii.html".
Notes
-----
A HTML file containing a Cosmology HTML table should have scripts enabling
MathJax.
::
<script
src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
<script type="text/javascript" id="MathJax-script" async
src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js">
</script>
"""
# Check that the format is 'ascii.html' (or not specified)
format = kwargs.pop("format", "ascii.html")
if format != "ascii.html":
raise ValueError(f"format must be 'ascii.html', not {format}")
# Set cosmology_in_meta as false for now since there is no metadata being kept
table = to_table(cosmology, cls=cls, cosmology_in_meta=False)
cosmo_cls = type(cosmology)
for name, col in table.columns.items():
param = getattr(cosmo_cls, name, None)
if not isinstance(param, Parameter) or param.unit in (None, u.one):
continue
# Replace column with unitless version
table.replace_column(name, (col << param.unit).value, copy=False)
# TODO! move the `latex_names` into `to_table`
if latex_names:
new_names = [_FORMAT_TABLE.get(k, k) for k in cosmology.__parameters__]
table.rename_columns(cosmology.__parameters__, new_names)
# Write HTML, using table I/O
table.write(file, overwrite=overwrite, format="ascii.html", **kwargs)
def html_identify(origin, filepath, fileobj, *args, **kwargs):
"""Identify if an object uses the HTML Table format.
Parameters
----------
origin : Any
Not used.
filepath : str or Any
From where to read the Cosmology.
fileobj : Any
Not used.
*args : Any
Not used.
**kwargs : Any
Not used.
Returns
-------
bool
If the filepath is a string ending with '.html'.
"""
return isinstance(filepath, str) and filepath.endswith(".html")
# ===================================================================
# Register
readwrite_registry.register_reader("ascii.html", Cosmology, read_html_table)
readwrite_registry.register_writer("ascii.html", Cosmology, write_html_table)
readwrite_registry.register_identifier("ascii.html", Cosmology, html_identify)
|
22ebd57a0010c0ea674ef94f1e09a7000a459b0cfee5ed13a39fc7a0e46c3d69 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The following are private functions, included here **FOR REFERENCE ONLY** since
the io registry cannot be displayed. These functions are registered into
:meth:`~astropy.cosmology.Cosmology.to_format` and
:meth:`~astropy.cosmology.Cosmology.from_format` and should only be accessed
via these methods.
""" # this is shown in the docs.
import copy
from collections.abc import Mapping
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology
__all__ = [] # nothing is publicly scoped
def from_mapping(map, *, move_to_meta=False, cosmology=None):
"""Load `~astropy.cosmology.Cosmology` from mapping object.
Parameters
----------
map : mapping
Arguments into the class -- like "name" or "meta".
If 'cosmology' is None, must have field "cosmology" which can be either
the string name of the cosmology class (e.g. "FlatLambdaCDM") or the
class itself.
move_to_meta : bool (optional, keyword-only)
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict
(e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta``
will be ``{'key': 10}``).
cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only)
The cosmology class (or string name thereof) to use when constructing
the cosmology instance. The class also provides default parameter values,
filling in any non-mandatory arguments missing in 'map'.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Examples
--------
To see loading a `~astropy.cosmology.Cosmology` from a dictionary with
``from_mapping``, we will first make a mapping using
:meth:`~astropy.cosmology.Cosmology.to_format`.
>>> from astropy.cosmology import Cosmology, Planck18
>>> cm = Planck18.to_format('mapping')
>>> cm
{'cosmology': <class 'astropy.cosmology.flrw.lambdacdm.FlatLambdaCDM'>,
'name': 'Planck18', 'H0': <Quantity 67.66 km / (Mpc s)>, 'Om0': 0.30966,
'Tcmb0': <Quantity 2.7255 K>, 'Neff': 3.046,
'm_nu': <Quantity [0. , 0. , 0.06] eV>, 'Ob0': 0.04897,
'meta': ...
Now this dict can be used to load a new cosmological instance identical
to the ``Planck18`` cosmology from which it was generated.
>>> cosmo = Cosmology.from_format(cm, format="mapping")
>>> cosmo
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
Specific cosmology classes can be used to parse the data. The class'
default parameter values are used to fill in any information missing in the
data.
>>> from astropy.cosmology import FlatLambdaCDM
>>> del cm["Tcmb0"] # show FlatLambdaCDM provides default
>>> FlatLambdaCDM.from_format(cm)
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=0.0 K, Neff=3.046, m_nu=None, Ob0=0.04897)
"""
params = dict(map) # so we are guaranteed to have a poppable map
# get cosmology
# 1st from argument. Allows for override of the cosmology, if on file.
# 2nd from params. This MUST have the cosmology if 'kwargs' did not.
if cosmology is None:
cosmology = params.pop("cosmology")
else:
params.pop("cosmology", None) # pop, but don't use
# if string, parse to class
if isinstance(cosmology, str):
cosmology = _COSMOLOGY_CLASSES[cosmology]
# select arguments from mapping that are in the cosmo's signature.
ba = cosmology._init_signature.bind_partial() # blank set of args
ba.apply_defaults() # fill in the defaults
for k in cosmology._init_signature.parameters.keys():
if k in params: # transfer argument, if in params
ba.arguments[k] = params.pop(k)
# deal with remaining params. If there is a **kwargs use that, else
# allow to transfer to metadata. Raise TypeError if can't.
lastp = tuple(cosmology._init_signature.parameters.values())[-1]
if lastp.kind == 4: # variable keyword-only
ba.arguments[lastp.name] = params
elif move_to_meta: # prefers current meta, which was explicitly set
meta = ba.arguments["meta"] or {} # (None -> dict)
ba.arguments["meta"] = {**params, **meta}
elif params:
raise TypeError(f"there are unused parameters {params}.")
# else: pass # no kwargs, no move-to-meta, and all the params are used
return cosmology(*ba.args, **ba.kwargs)
def to_mapping(
cosmology, *args, cls=dict, cosmology_as_str=False, move_from_meta=False
):
"""Return the cosmology class, parameters, and metadata as a `dict`.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
*args
Not used. Needed for compatibility with
`~astropy.io.registry.UnifiedReadWriteMethod`
cls : type (optional, keyword-only)
`dict` or `collections.Mapping` subclass.
The mapping type to return. Default is `dict`.
cosmology_as_str : bool (optional, keyword-only)
Whether the cosmology value is the class (if `False`, default) or
the semi-qualified name (if `True`).
move_from_meta : bool (optional, keyword-only)
Whether to add the Cosmology's metadata as an item to the mapping (if
`False`, default) or to merge with the rest of the mapping, preferring
the original values (if `True`)
Returns
-------
dict
with key-values for the cosmology parameters and also:
- 'cosmology' : the class
- 'meta' : the contents of the cosmology's metadata attribute.
If ``move_from_meta`` is `True`, this key is missing and the
contained metadata are added to the main `dict`.
Examples
--------
A Cosmology as a mapping will have the cosmology's name and
parameters as items, and the metadata as a nested dictionary.
>>> from astropy.cosmology import Planck18
>>> Planck18.to_format('mapping')
{'cosmology': <class 'astropy.cosmology.flrw.lambdacdm.FlatLambdaCDM'>,
'name': 'Planck18', 'H0': <Quantity 67.66 km / (Mpc s)>, 'Om0': 0.30966,
'Tcmb0': <Quantity 2.7255 K>, 'Neff': 3.046,
'm_nu': <Quantity [0. , 0. , 0.06] eV>, 'Ob0': 0.04897,
'meta': ...
The dictionary type may be changed with the ``cls`` keyword argument:
>>> from collections import OrderedDict
>>> Planck18.to_format('mapping', cls=OrderedDict)
OrderedDict([('cosmology', <class 'astropy.cosmology.flrw.lambdacdm.FlatLambdaCDM'>),
('name', 'Planck18'), ('H0', <Quantity 67.66 km / (Mpc s)>),
('Om0', 0.30966), ('Tcmb0', <Quantity 2.7255 K>), ('Neff', 3.046),
('m_nu', <Quantity [0. , 0. , 0.06] eV>), ('Ob0', 0.04897),
('meta', ...
Sometimes it is more useful to have the name of the cosmology class, not
the object itself. The keyword argument ``cosmology_as_str`` may be used:
>>> Planck18.to_format('mapping', cosmology_as_str=True)
{'cosmology': 'FlatLambdaCDM', ...
The metadata is normally included as a nested mapping. To move the metadata
into the main mapping, use the keyword argument ``move_from_meta``. This
kwarg inverts ``move_to_meta`` in
``Cosmology.to_format("mapping", move_to_meta=...)`` where extra items
are moved to the metadata (if the cosmology constructor does not have a
variable keyword-only argument -- ``**kwargs``).
>>> from astropy.cosmology import Planck18
>>> Planck18.to_format('mapping', move_from_meta=True)
{'cosmology': <class 'astropy.cosmology.flrw.lambdacdm.FlatLambdaCDM'>,
'name': 'Planck18', 'Oc0': 0.2607, 'n': 0.9665, 'sigma8': 0.8102, ...
"""
if not issubclass(cls, (dict, Mapping)):
raise TypeError(f"'cls' must be a (sub)class of dict or Mapping, not {cls}")
m = cls()
# start with the cosmology class & name
m["cosmology"] = (
cosmology.__class__.__qualname__ if cosmology_as_str else cosmology.__class__
)
m["name"] = cosmology.name # here only for dict ordering
meta = copy.deepcopy(cosmology.meta) # metadata (mutable)
if move_from_meta:
# Merge the mutable metadata. Since params are added later they will
# be preferred in cases of overlapping keys. Likewise, need to pop
# cosmology and name from meta.
meta.pop("cosmology", None)
meta.pop("name", None)
m.update(meta)
# Add all the immutable inputs
m.update(
{
k: v
for k, v in cosmology._init_arguments.items()
if k not in ("meta", "name")
}
)
# Lastly, add the metadata, if haven't already (above)
if not move_from_meta:
m["meta"] = meta # TODO? should meta be type(cls)
return m
def mapping_identify(origin, format, *args, **kwargs):
"""Identify if object uses the mapping format.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Mapping) and (format in (None, "mapping"))
return itis
# ===================================================================
# Register
convert_registry.register_reader("mapping", Cosmology, from_mapping)
convert_registry.register_writer("mapping", Cosmology, to_mapping)
convert_registry.register_identifier("mapping", Cosmology, mapping_identify)
|
3b62e9cfb3a7360af99ad77f64c7e1330933447353ac7230fc37adc43967c06c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.modeling import Parameter as ModelParameter
from astropy.table import Column
FULLQUALNAME_SUBSTITUTIONS = {
"astropy.cosmology.flrw.base.FLRW": "astropy.cosmology.flrw.FLRW",
"astropy.cosmology.flrw.lambdacdm.LambdaCDM": "astropy.cosmology.flrw.LambdaCDM",
"astropy.cosmology.flrw.lambdacdm.FlatLambdaCDM": (
"astropy.cosmology.flrw.FlatLambdaCDM"
),
"astropy.cosmology.flrw.w0wacdm.w0waCDM": "astropy.cosmology.flrw.w0waCDM",
"astropy.cosmology.flrw.w0wacdm.Flatw0waCDM": "astropy.cosmology.flrw.Flatw0waCDM",
"astropy.cosmology.flrw.w0wzcdm.w0wzCDM": "astropy.cosmology.flrw.w0wzCDM",
"astropy.cosmology.flrw.w0cdm.wCDM": "astropy.cosmology.flrw.wCDM",
"astropy.cosmology.flrw.w0cdm.FlatwCDM": "astropy.cosmology.flrw.FlatwCDM",
"astropy.cosmology.flrw.wpwazpcdm.wpwaCDM": "astropy.cosmology.flrw.wpwaCDM",
}
"""Substitutions mapping the actual qualified name to its preferred value."""
def convert_parameter_to_column(parameter, value, meta=None):
"""Convert a |Cosmology| Parameter to a Table |Column|.
Parameters
----------
parameter : `astropy.cosmology.parameter.Parameter`
value : Any
meta : dict or None, optional
Information from the Cosmology's metadata.
Returns
-------
`astropy.table.Column`
"""
shape = (1,) + np.shape(value) # minimum of 1d
col = Column(
data=np.reshape(value, shape),
name=parameter.name,
dtype=None, # inferred from the data
description=parameter.__doc__,
format=None,
meta=meta,
)
return col
def convert_parameter_to_model_parameter(parameter, value, meta=None):
"""Convert a Cosmology Parameter to a Model Parameter.
Parameters
----------
parameter : `astropy.cosmology.parameter.Parameter`
value : Any
meta : dict or None, optional
Information from the Cosmology's metadata.
This function will use any of: 'getter', 'setter', 'fixed', 'tied',
'min', 'max', 'bounds', 'prior', 'posterior'.
Returns
-------
`astropy.modeling.Parameter`
"""
# Get from meta information relavant to Model
attrs = (
"getter",
"setter",
"fixed",
"tied",
"min",
"max",
"bounds",
"prior",
"posterior",
)
extra = {k: v for k, v in (meta or {}).items() if k in attrs}
return ModelParameter(
description=parameter.__doc__,
default=value,
unit=getattr(value, "unit", None),
**extra
)
|
d0156126f15fb8fd21d08ad92751b5e5569c60a2b0cc05f5022edaaf86da41ce | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology.connect import readwrite_registry
from astropy.cosmology.core import Cosmology
from astropy.table import QTable
from .table import from_table, to_table
def read_ecsv(filename, index=None, *, move_to_meta=False, cosmology=None, **kwargs):
"""Read a `~astropy.cosmology.Cosmology` from an ECSV file.
Parameters
----------
filename : path-like or file-like
From where to read the Cosmology.
index : int, str, or None, optional
Needed to select the row in tables with multiple rows. ``index`` can be
an integer for the row number or, if the table is indexed by a column,
the value of that column. If the table is not indexed and ``index``
is a string, the "name" column is used as the indexing column.
move_to_meta : bool (optional, keyword-only)
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict
(e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta``
will be ``{'key': 10}``).
cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only)
The cosmology class (or string name thereof) to use when constructing
the cosmology instance. The class also provides default parameter values,
filling in any non-mandatory arguments missing in 'table'.
**kwargs
Passed to :attr:`astropy.table.QTable.read`
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
"""
kwargs["format"] = "ascii.ecsv"
with u.add_enabled_units(cu):
table = QTable.read(filename, **kwargs)
# build cosmology from table
return from_table(
table, index=index, move_to_meta=move_to_meta, cosmology=cosmology
)
def write_ecsv(
cosmology, file, *, overwrite=False, cls=QTable, cosmology_in_meta=True, **kwargs
):
"""Serialize the cosmology into a ECSV.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
file : path-like or file-like
Location to save the serialized cosmology.
overwrite : bool
Whether to overwrite the file, if it exists.
cls : type (optional, keyword-only)
Astropy :class:`~astropy.table.Table` (sub)class to use when writing.
Default is :class:`~astropy.table.QTable`.
cosmology_in_meta : bool
Whether to put the cosmology class in the Table metadata (if `True`,
default) or as the first column (if `False`).
**kwargs
Passed to ``cls.write``
Raises
------
TypeError
If kwarg (optional) 'cls' is not a subclass of `astropy.table.Table`
"""
table = to_table(cosmology, cls=cls, cosmology_in_meta=cosmology_in_meta)
kwargs["format"] = "ascii.ecsv"
table.write(file, overwrite=overwrite, **kwargs)
def ecsv_identify(origin, filepath, fileobj, *args, **kwargs):
"""Identify if object uses the Table format.
Returns
-------
bool
"""
return filepath is not None and filepath.endswith(".ecsv")
# ===================================================================
# Register
readwrite_registry.register_reader("ascii.ecsv", Cosmology, read_ecsv)
readwrite_registry.register_writer("ascii.ecsv", Cosmology, write_ecsv)
readwrite_registry.register_identifier("ascii.ecsv", Cosmology, ecsv_identify)
|
d01c27897ae7fd956c3463b457ec4797846e20e929e0ca796f59863bc5df3cb7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from math import inf
import numpy as np
import pytest
from astropy.cosmology.utils import (
aszarr,
inf_like,
vectorize_if_needed,
vectorize_redshift_method,
)
from astropy.utils.exceptions import AstropyDeprecationWarning
from .test_core import _zarr, invalid_zs, valid_zs
def test_vectorize_redshift_method():
"""Test :func:`astropy.cosmology.utils.vectorize_redshift_method`."""
class Class:
@vectorize_redshift_method
def method(self, z):
return z
c = Class()
assert hasattr(c.method, "__vectorized__")
assert isinstance(c.method.__vectorized__, np.vectorize)
# calling with Number
assert c.method(1) == 1
assert isinstance(c.method(1), int)
# calling with a numpy scalar
assert c.method(np.float64(1)) == np.float64(1)
assert isinstance(c.method(np.float64(1)), np.float64)
# numpy array
assert all(c.method(np.array([1, 2])) == np.array([1, 2]))
assert isinstance(c.method(np.array([1, 2])), np.ndarray)
# non-scalar
assert all(c.method([1, 2]) == np.array([1, 2]))
assert isinstance(c.method([1, 2]), np.ndarray)
def test_vectorize_if_needed():
"""
Test :func:`astropy.cosmology.utils.vectorize_if_needed`.
There's no need to test 'veckw' because that is directly pasased to
`numpy.vectorize` which thoroughly tests the various inputs.
"""
def func(x):
return x**2
with pytest.warns(AstropyDeprecationWarning):
# not vectorized
assert vectorize_if_needed(func, 2) == 4
# vectorized
assert all(vectorize_if_needed(func, [2, 3]) == [4, 9])
@pytest.mark.parametrize(
"arr, expected",
[
(0.0, inf), # float scalar
(1, inf), # integer scalar should give float output
([0.0, 1.0, 2.0, 3.0], (inf, inf, inf, inf)),
([0, 1, 2, 3], (inf, inf, inf, inf)), # integer list
],
)
def test_inf_like(arr, expected):
"""
Test :func:`astropy.cosmology.utils.inf_like`.
All inputs should give a float output.
These tests are also in the docstring, but it's better to have them also
in one consolidated location.
"""
with pytest.warns(AstropyDeprecationWarning):
assert np.all(inf_like(arr) == expected)
# -------------------------------------------------------------------
class Test_aszarr:
@pytest.mark.parametrize(
"z, expect",
list(
zip(
valid_zs,
[0, 1, 1100, np.float64(3300), 2.0, 3.0, _zarr, _zarr, _zarr, _zarr],
)
),
)
def test_valid(self, z, expect):
"""Test :func:`astropy.cosmology.utils.aszarr`."""
got = aszarr(z)
assert np.array_equal(got, expect)
@pytest.mark.parametrize("z, exc", invalid_zs)
def test_invalid(self, z, exc):
"""Test :func:`astropy.cosmology.utils.aszarr`."""
with pytest.raises(exc):
aszarr(z)
|
c909205b362821a556fe8d9c68539d0100a8fb94cfd49835b9f6217dd96c8017 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Configure the tests for :mod:`astropy.cosmology`."""
from astropy.cosmology.tests.helper import clean_registry # noqa: F401
from astropy.tests.helper import pickle_protocol # noqa: F401
|
6118cbb0b44399a8a9624a48a00044d2f99f7f1c4463c8774d141b5b64cc927d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.parameter`."""
##############################################################################
# IMPORTS
# STDLIB
import inspect
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
import astropy.units as u
from astropy.cosmology import Cosmology
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.parameter import (
Parameter,
_validate_to_float,
_validate_with_unit,
)
from astropy.utils.exceptions import AstropyDeprecationWarning
##############################################################################
# TESTS
##############################################################################
class ParameterTestMixin:
"""Tests for a :class:`astropy.cosmology.Parameter` on a Cosmology.
:class:`astropy.cosmology.Parameter` is a descriptor and this test suite
tests descriptors by class inheritance, so ``ParameterTestMixin`` is mixed
into ``TestCosmology`` (tests :class:`astropy.cosmology.Cosmology`).
"""
@pytest.fixture
def parameter(self, cosmo_cls):
"""Cosmological Parameters"""
# I wish this would work
# yield from {getattr(cosmo_cls, n) for n in cosmo_cls.__parameters__}
# just return one parameter at random
yield getattr(cosmo_cls, set(cosmo_cls.__parameters__).pop())
@pytest.fixture
def all_parameter(self, cosmo_cls):
"""Cosmological All Parameter instances"""
# I wish this would work
# yield from {getattr(cosmo_cls, n) for n in cosmo_cls.__all_parameters__}
# just return one parameter at random
yield getattr(cosmo_cls, set(cosmo_cls.__all_parameters__).pop())
# ===============================================================
# Method Tests
def test_Parameter_class_attributes(self, all_parameter):
"""Test :class:`astropy.cosmology.Parameter` attributes on class."""
# _registry_validators
assert hasattr(all_parameter, "_registry_validators")
assert isinstance(all_parameter._registry_validators, dict)
assert all(
isinstance(k, str) for k in all_parameter._registry_validators.keys()
)
assert all(callable(v) for v in all_parameter._registry_validators.values())
def test_Parameter_init(self):
"""Test :class:`astropy.cosmology.Parameter` instantiation."""
# defaults
parameter = Parameter()
assert parameter.fvalidate is _validate_with_unit
assert parameter.unit is None
assert parameter.equivalencies == []
assert parameter.derived is False
assert parameter.name is None
# setting all kwargs
parameter = Parameter(
fvalidate="float",
doc="DOCSTRING",
unit="km",
equivalencies=[u.mass_energy()],
derived=True,
)
assert parameter.fvalidate is _validate_to_float
assert parameter.unit is u.km
assert parameter.equivalencies == [u.mass_energy()]
assert parameter.derived is True
def test_Parameter_init_deprecated_fmt(self):
"""Test that passing the argument ``fmt`` is deprecated."""
with pytest.warns(AstropyDeprecationWarning):
parameter = Parameter(fmt=".4f")
assert parameter._format_spec == ".4f"
# Test that it appears in initializing arguments
init_args = parameter._get_init_arguments()
assert init_args["fmt"] == ".4f"
def test_Parameter_instance_attributes(self, all_parameter):
"""Test :class:`astropy.cosmology.Parameter` attributes from init."""
assert hasattr(all_parameter, "fvalidate")
assert callable(all_parameter.fvalidate)
assert hasattr(all_parameter, "__doc__")
# Parameter
assert hasattr(all_parameter, "_unit")
assert hasattr(all_parameter, "_equivalencies")
assert hasattr(all_parameter, "_derived")
assert hasattr(all_parameter, "_format_spec")
# __set_name__
assert hasattr(all_parameter, "_attr_name")
assert hasattr(all_parameter, "_attr_name_private")
def test_Parameter_fvalidate(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.fvalidate`."""
assert hasattr(all_parameter, "fvalidate")
assert callable(all_parameter.fvalidate)
def test_Parameter_name(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.name`."""
assert hasattr(all_parameter, "name")
assert isinstance(all_parameter.name, str)
assert all_parameter.name is all_parameter._attr_name
def test_Parameter_unit(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.unit`."""
assert hasattr(all_parameter, "unit")
assert isinstance(all_parameter.unit, (u.UnitBase, type(None)))
assert all_parameter.unit is all_parameter._unit
def test_Parameter_equivalencies(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.equivalencies`."""
assert hasattr(all_parameter, "equivalencies")
assert isinstance(all_parameter.equivalencies, (list, u.Equivalency))
assert all_parameter.equivalencies is all_parameter._equivalencies
def test_Parameter_format_spec(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.format_spec`."""
with pytest.warns(AstropyDeprecationWarning):
fmt = all_parameter.format_spec
assert isinstance(fmt, str)
assert fmt is all_parameter._format_spec
def test_Parameter_derived(self, cosmo_cls, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.derived`."""
assert hasattr(all_parameter, "derived")
assert isinstance(all_parameter.derived, bool)
assert all_parameter.derived is all_parameter._derived
# test value
if all_parameter.name in cosmo_cls.__parameters__:
assert all_parameter.derived is False
else:
assert all_parameter.derived is True
# -------------------------------------------
# descriptor methods
def test_Parameter_descriptor_get(self, cosmo_cls, cosmo, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.__get__`."""
# from class
parameter = getattr(cosmo_cls, all_parameter.name)
assert isinstance(parameter, Parameter)
assert parameter is all_parameter
# from instance
parameter = getattr(cosmo, all_parameter.name)
assert np.all(parameter == getattr(cosmo, all_parameter._attr_name_private))
def test_Parameter_descriptor_set(self, cosmo, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.__set__`."""
# test it's already set
assert hasattr(cosmo, all_parameter._attr_name_private)
# and raises an error if set again
with pytest.raises(AttributeError, match="can't set attribute"):
setattr(cosmo, all_parameter._attr_name, None)
# -------------------------------------------
# validate value
# tested later.
# ===============================================================
# Usage Tests
def test_Parameter_listed(self, cosmo_cls, all_parameter):
"""Test each `astropy.cosmology.Parameter` attached to Cosmology."""
# just double check that each entry is a Parameter
assert isinstance(all_parameter, Parameter)
# the reverse: check that if it is a Parameter, it's listed.
# note have to check the more inclusive ``__all_parameters__``
assert all_parameter.name in cosmo_cls.__all_parameters__
if not all_parameter.derived:
assert all_parameter.name in cosmo_cls.__parameters__
def test_parameter_related_attributes_on_Cosmology(self, cosmo_cls):
"""Test `astropy.cosmology.Parameter`-related on Cosmology."""
# establish has expected attribute
assert hasattr(cosmo_cls, "__parameters__")
assert hasattr(cosmo_cls, "__all_parameters__")
def test_Parameter_not_unique(self, cosmo_cls, clean_registry):
"""Cosmology Parameter not unique to class when subclass defined."""
# define subclass to show param is same
class ExampleBase(cosmo_cls):
param = Parameter()
class Example(ExampleBase):
pass
assert Example.param is ExampleBase.param
assert Example.__parameters__ == ExampleBase.__parameters__
def test_Parameters_reorder_by_signature(self, cosmo_cls, clean_registry):
"""Test parameters are reordered."""
class Example(cosmo_cls):
param = Parameter()
def __init__(self, param, *, name=None, meta=None):
pass # never actually initialized
# param should be 1st, all other parameters next
Example.__parameters__[0] == "param"
# Check the other parameters are as expected.
# only run this test if "param" is not already on the cosmology
if cosmo_cls.__parameters__[0] != "param":
assert set(Example.__parameters__[1:]) == set(cosmo_cls.__parameters__)
def test_make_from_Parameter(self, cosmo_cls, clean_registry):
"""Test the parameter creation process. Uses ``__set__``."""
class Example(cosmo_cls):
param = Parameter(unit=u.eV, equivalencies=u.mass_energy())
def __init__(self, param, *, name=None, meta=None):
self.param = param
@property
def is_flat(self):
return super().is_flat()
assert Example(1).param == 1 * u.eV
assert Example(1 * u.eV).param == 1 * u.eV
assert Example(1 * u.J).param == (1 * u.J).to(u.eV)
assert Example(1 * u.kg).param == (1 * u.kg).to(u.eV, u.mass_energy())
# ========================================================================
class TestParameter(ParameterTestMixin):
"""
Test `astropy.cosmology.Parameter` directly. Adds a lot of specific tests
that wouldn't be covered by the per-cosmology tests.
"""
def setup_class(self):
class Example1(Cosmology):
param = Parameter(
doc="Description of example parameter.",
unit=u.m,
equivalencies=u.mass_energy(),
)
def __init__(self, param=15):
self.param = param
@property
def is_flat(self):
return super().is_flat()
# with validator
class Example2(Example1):
def __init__(self, param=15 * u.m):
self.param = param
@Example1.param.validator
def param(self, param, value):
return value.to(u.km)
# attributes
self.classes = {"Example1": Example1, "Example2": Example2}
def teardown_class(self):
for cls in self.classes.values():
_COSMOLOGY_CLASSES.pop(cls.__qualname__)
@pytest.fixture(scope="class", params=["Example1", "Example2"])
def cosmo_cls(self, request):
"""Cosmology class."""
return self.classes[request.param]
@pytest.fixture(scope="class")
def cosmo(self, cosmo_cls):
"""Cosmology instance"""
return cosmo_cls()
@pytest.fixture(scope="class")
def param(self, cosmo_cls):
"""Get Parameter 'param' from cosmology class."""
return cosmo_cls.param
# ==============================================================
def test_Parameter_instance_attributes(self, param):
"""Test :class:`astropy.cosmology.Parameter` attributes from init."""
super().test_Parameter_instance_attributes(param)
# property
assert param.__doc__ == "Description of example parameter."
# custom from init
assert param._unit == u.m
assert param._equivalencies == u.mass_energy()
assert param._format_spec == ""
assert param._derived == np.False_
# custom from set_name
assert param._attr_name == "param"
assert param._attr_name_private == "_param"
def test_Parameter_fvalidate(self, cosmo, param):
"""Test :attr:`astropy.cosmology.Parameter.fvalidate`."""
super().test_Parameter_fvalidate(param)
value = param.fvalidate(cosmo, param, 1000 * u.m)
assert value == 1 * u.km
def test_Parameter_name(self, param):
"""Test :attr:`astropy.cosmology.Parameter.name`."""
super().test_Parameter_name(param)
assert param.name == "param"
def test_Parameter_unit(self, param):
"""Test :attr:`astropy.cosmology.Parameter.unit`."""
super().test_Parameter_unit(param)
assert param.unit == u.m
def test_Parameter_equivalencies(self, param):
"""Test :attr:`astropy.cosmology.Parameter.equivalencies`."""
super().test_Parameter_equivalencies(param)
assert param.equivalencies == u.mass_energy()
def test_Parameter_format_spec(self, param):
"""Test :attr:`astropy.cosmology.Parameter.format_spec`."""
super().test_Parameter_format_spec(param)
with pytest.warns(AstropyDeprecationWarning):
assert param.format_spec == ""
def test_Parameter_derived(self, cosmo_cls, param):
"""Test :attr:`astropy.cosmology.Parameter.derived`."""
super().test_Parameter_derived(cosmo_cls, param)
assert param.derived is False
# -------------------------------------------
# descriptor methods
def test_Parameter_descriptor_get(self, cosmo_cls, cosmo, param):
"""Test :meth:`astropy.cosmology.Parameter.__get__`."""
super().test_Parameter_descriptor_get(cosmo_cls, cosmo, param)
# from instance
value = getattr(cosmo, param.name)
assert value == 15 * u.m
# -------------------------------------------
# validation
def test_Parameter_validator(self, param):
"""Test :meth:`astropy.cosmology.Parameter.validator`."""
for k in Parameter._registry_validators:
newparam = param.validator(k)
assert newparam.fvalidate == newparam._registry_validators[k]
# error for non-registered str
with pytest.raises(ValueError, match="`fvalidate`, if str"):
Parameter(fvalidate="NOT REGISTERED")
# error if wrong type
with pytest.raises(TypeError, match="`fvalidate` must be a function or"):
Parameter(fvalidate=object())
def test_Parameter_validate(self, cosmo, param):
"""Test :meth:`astropy.cosmology.Parameter.validate`."""
value = param.validate(cosmo, 1000 * u.m)
# whether has custom validator
if param.fvalidate is param._registry_validators["default"]:
assert value.unit == u.m
assert value.value == 1000
else:
assert value.unit == u.km
assert value.value == 1
def test_Parameter_register_validator(self, param):
"""Test :meth:`astropy.cosmology.Parameter.register_validator`."""
# already registered
with pytest.raises(KeyError, match="validator 'default' already"):
param.__class__.register_validator("default", None)
# validator not None
def notnonefunc(x):
return x
try:
validator = param.__class__.register_validator("newvalidator", notnonefunc)
assert validator is notnonefunc
finally:
param.__class__._registry_validators.pop("newvalidator", None)
# used as decorator
try:
@param.__class__.register_validator("newvalidator")
def func(cosmology, param, value):
return value
assert param.__class__._registry_validators["newvalidator"] is func
finally:
param.__class__._registry_validators.pop("newvalidator", None)
# -------------------------------------------
def test_Parameter_clone(self, param):
"""Test :meth:`astropy.cosmology.Parameter.clone`."""
# this implicitly relies on `__eq__` testing properly. Which is tested.
# basic test that nothing changes
assert param.clone() == param
assert param.clone() is not param # but it's not a 'singleton'
# passing kwargs will change stuff
newparam = param.clone(unit="km/(yr sr)")
assert newparam.unit == u.km / u.yr / u.sr
assert param.unit != u.km / u.yr / u.sr # original is unchanged
# expected failure for not-an-argument
with pytest.raises(TypeError):
param.clone(not_a_valid_parameter=True)
# -------------------------------------------
def test_Parameter_equality(self):
"""
Test Parameter equality.
Determined from the processed initialization args (including defaults).
"""
p1 = Parameter(unit="km / (s Mpc)")
p2 = Parameter(unit="km / (s Mpc)")
assert p1 == p2
# not equal parameters
p3 = Parameter(unit="km / s")
assert p3 != p1
# misc
assert p1 != 2 # show doesn't error
# -------------------------------------------
def test_Parameter_repr(self, cosmo_cls, param):
"""Test Parameter repr."""
r = repr(param)
assert "Parameter(" in r
for subs in (
"derived=False",
'unit=Unit("m")',
'equivalencies=[(Unit("kg"), Unit("J")',
"doc='Description of example parameter.'",
):
assert subs in r, subs
# `fvalidate` is a little tricker b/c one of them is custom!
if param.fvalidate in param._registry_validators.values(): # not custom
assert "fvalidate='default'" in r
else:
assert "fvalidate=<" in r # Some function, don't care about details.
def test_Parameter_repr_roundtrip(self, param):
"""Test ``eval(repr(Parameter))`` can round trip to ``Parameter``."""
P = Parameter(doc="A description of this parameter.", derived=True)
NP = eval(repr(P)) # Evaluate string representation back into a param.
assert P == NP
# ==============================================================
def test_Parameter_doesnt_change_with_generic_class(self):
"""Descriptors are initialized once and not updated on subclasses."""
class ExampleBase:
def __init__(self, param=15):
self._param = param
sig = inspect.signature(__init__)
_init_signature = sig.replace(parameters=list(sig.parameters.values())[1:])
param = Parameter(doc="example parameter")
class Example(ExampleBase):
pass
assert Example.param is ExampleBase.param
def test_Parameter_doesnt_change_with_cosmology(self, cosmo_cls):
"""Cosmology reinitializes all descriptors when a subclass is defined."""
# define subclass to show param is same
class Example(cosmo_cls):
pass
assert Example.param is cosmo_cls.param
# unregister
_COSMOLOGY_CLASSES.pop(Example.__qualname__)
assert Example.__qualname__ not in _COSMOLOGY_CLASSES
|
0b154335d1ec1a58d0c9fe5377421b6b7efe980c58d777d4d3ec57d2f03065ae | """Testing :mod:`astropy.cosmology.units`."""
##############################################################################
# IMPORTS
import pytest
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology import Planck13, default_cosmology
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyDeprecationWarning
##############################################################################
# TESTS
##############################################################################
def test_has_expected_units():
"""
Test that this module has the expected set of units. Some of the units are
imported from :mod:`astropy.units`, or vice versa. Here we test presence,
not usage. Units from :mod:`astropy.units` are tested in that module. Units
defined in :mod:`astropy.cosmology` will be tested subsequently.
"""
with pytest.warns(AstropyDeprecationWarning, match="`littleh`"):
assert u.astrophys.littleh is cu.littleh
def test_has_expected_equivalencies():
"""
Test that this module has the expected set of equivalencies. Many of the
equivalencies are imported from :mod:`astropy.units`, so here we test
presence, not usage. Equivalencies from :mod:`astropy.units` are tested in
that module. Equivalencies defined in :mod:`astropy.cosmology` will be
tested subsequently.
"""
with pytest.warns(AstropyDeprecationWarning, match="`with_H0`"):
assert u.equivalencies.with_H0 is cu.with_H0
def test_littleh():
"""Test :func:`astropy.cosmology.units.with_H0`."""
H0_70 = 70 * u.km / u.s / u.Mpc
h70dist = 70 * u.Mpc / cu.littleh
assert_quantity_allclose(h70dist.to(u.Mpc, cu.with_H0(H0_70)), 100 * u.Mpc)
# make sure using the default cosmology works
cosmodist = default_cosmology.get().H0.value * u.Mpc / cu.littleh
assert_quantity_allclose(cosmodist.to(u.Mpc, cu.with_H0()), 100 * u.Mpc)
# Now try a luminosity scaling
h1lum = 0.49 * u.Lsun * cu.littleh**-2
assert_quantity_allclose(h1lum.to(u.Lsun, cu.with_H0(H0_70)), 1 * u.Lsun)
# And the trickiest one: magnitudes. Using H0=10 here for the round numbers
H0_10 = 10 * u.km / u.s / u.Mpc
# assume the "true" magnitude M = 12.
# Then M - 5*log_10(h) = M + 5 = 17
withlittlehmag = 17 * (u.mag - u.MagUnit(cu.littleh**2))
assert_quantity_allclose(withlittlehmag.to(u.mag, cu.with_H0(H0_10)), 12 * u.mag)
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
def test_dimensionless_redshift():
"""Test :func:`astropy.cosmology.units.dimensionless_redshift`."""
z = 3 * cu.redshift
val = 3 * u.one
# show units not equal
assert z.unit == cu.redshift
assert z.unit != u.one
assert u.get_physical_type(z) == "redshift"
# test equivalency enabled by default
assert z == val
# also test that it works for powers
assert (3 * cu.redshift**3) == val
# and in composite units
assert (3 * u.km / cu.redshift**3) == 3 * u.km
# test it also works as an equivalency
with u.set_enabled_equivalencies([]): # turn off default equivalencies
assert z.to(u.one, equivalencies=cu.dimensionless_redshift()) == val
with pytest.raises(ValueError):
z.to(u.one)
# if this fails, something is really wrong
with u.add_enabled_equivalencies(cu.dimensionless_redshift()):
assert z == val
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
def test_redshift_temperature():
"""Test :func:`astropy.cosmology.units.redshift_temperature`."""
cosmo = Planck13.clone(Tcmb0=3 * u.K)
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
Tcmb = cosmo.Tcmb(z)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.redshift_temperature()
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.redshift_temperature()
assert_quantity_allclose(z.to(u.K, equivalency), default_cosmo.Tcmb(z))
assert default_cosmo.Tcmb(z) != Tcmb
# 2) Specifying the cosmology
equivalency = cu.redshift_temperature(cosmo)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# Test `atzkw`
equivalency = cu.redshift_temperature(cosmo, ztol=1e-10)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
def test_redshift_hubble():
"""Test :func:`astropy.cosmology.units.redshift_hubble`."""
unit = u.km / u.s / u.Mpc
cosmo = Planck13.clone(H0=100 * unit)
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
H = cosmo.H(z)
h = H.to_value(u.km / u.s / u.Mpc) / 100 * cu.littleh
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.redshift_hubble()
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.redshift_hubble()
assert_quantity_allclose(z.to(unit, equivalency), default_cosmo.H(z))
assert default_cosmo.H(z) != H
# 2) Specifying the cosmology
equivalency = cu.redshift_hubble(cosmo)
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# Test `atzkw`
equivalency = cu.redshift_hubble(cosmo, ztol=1e-10)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # H
assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # little-h
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
@pytest.mark.parametrize(
"kind",
[cu.redshift_distance.__defaults__[-1], "comoving", "lookback", "luminosity"],
)
def test_redshift_distance(kind):
"""Test :func:`astropy.cosmology.units.redshift_distance`."""
z = 15 * cu.redshift
d = getattr(Planck13, kind + "_distance")(z)
equivalency = cu.redshift_distance(cosmology=Planck13, kind=kind)
# properties of Equivalency
assert equivalency.name[0] == "redshift_distance"
assert equivalency.kwargs[0]["cosmology"] == Planck13
assert equivalency.kwargs[0]["distance"] == kind
# roundtrip
assert_quantity_allclose(z.to(u.Mpc, equivalency), d)
assert_quantity_allclose(d.to(cu.redshift, equivalency), z)
def test_redshift_distance_wrong_kind():
"""Test :func:`astropy.cosmology.units.redshift_distance` wrong kind."""
with pytest.raises(ValueError, match="`kind`"):
cu.redshift_distance(kind=None)
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
class Test_with_redshift:
"""Test `astropy.cosmology.units.with_redshift`."""
@pytest.fixture(scope="class")
def cosmo(self):
"""Test cosmology."""
return Planck13.clone(Tcmb0=3 * u.K)
# ===========================================
def test_cosmo_different(self, cosmo):
"""The default is different than the test cosmology."""
default_cosmo = default_cosmology.get()
assert default_cosmo != cosmo # shows changing default
def test_no_equivalency(self, cosmo):
"""Test the equivalency ``with_redshift`` without any enabled."""
equivalency = cu.with_redshift(distance=None, hubble=False, Tcmb=False)
assert len(equivalency) == 0
# -------------------------------------------
def test_temperature_off(self, cosmo):
"""Test ``with_redshift`` with the temperature off."""
z = 15 * cu.redshift
err_msg = (
r"^'redshift' \(redshift\) and 'K' \(temperature\) are not convertible$"
)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(Tcmb=False)
with pytest.raises(u.UnitConversionError, match=err_msg):
z.to(u.K, equivalency)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, Tcmb=False)
with pytest.raises(u.UnitConversionError, match=err_msg):
z.to(u.K, equivalency)
def test_temperature(self, cosmo):
"""Test temperature equivalency component."""
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
Tcmb = cosmo.Tcmb(z)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.with_redshift(Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), default_cosmo.Tcmb(z))
assert default_cosmo.Tcmb(z) != Tcmb
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# Test `atzkw`
# this is really just a test that 'atzkw' doesn't fail
equivalency = cu.with_redshift(cosmo, Tcmb=True, atzkw={"ztol": 1e-10})
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# -------------------------------------------
def test_hubble_off(self, cosmo):
"""Test ``with_redshift`` with Hubble off."""
unit = u.km / u.s / u.Mpc
z = 15 * cu.redshift
err_msg = (
r"^'redshift' \(redshift\) and 'km / \(Mpc s\)' \(frequency\) are not "
"convertible$"
)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(hubble=False)
with pytest.raises(u.UnitConversionError, match=err_msg):
z.to(unit, equivalency)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, hubble=False)
with pytest.raises(u.UnitConversionError, match=err_msg):
z.to(unit, equivalency)
def test_hubble(self, cosmo):
"""Test Hubble equivalency component."""
unit = u.km / u.s / u.Mpc
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
H = cosmo.H(z)
h = H.to_value(u.km / u.s / u.Mpc) / 100 * cu.littleh
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(hubble=True)
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.with_redshift(hubble=True)
assert_quantity_allclose(z.to(unit, equivalency), default_cosmo.H(z))
assert default_cosmo.H(z) != H
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, hubble=True)
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# Test `atzkw`
# this is really just a test that 'atzkw' doesn't fail
equivalency = cu.with_redshift(cosmo, hubble=True, atzkw={"ztol": 1e-10})
assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # H
assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # h
# -------------------------------------------
def test_distance_off(self, cosmo):
"""Test ``with_redshift`` with the distance off."""
z = 15 * cu.redshift
err_msg = r"^'redshift' \(redshift\) and 'Mpc' \(length\) are not convertible$"
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(distance=None)
with pytest.raises(u.UnitConversionError, match=err_msg):
z.to(u.Mpc, equivalency)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, distance=None)
with pytest.raises(u.UnitConversionError, match=err_msg):
z.to(u.Mpc, equivalency)
def test_distance_default(self):
"""Test distance equivalency default."""
z = 15 * cu.redshift
d = default_cosmology.get().comoving_distance(z)
equivalency = cu.with_redshift()
assert_quantity_allclose(z.to(u.Mpc, equivalency), d)
assert_quantity_allclose(d.to(cu.redshift, equivalency), z)
def test_distance_wrong_kind(self):
"""Test distance equivalency, but the wrong kind."""
with pytest.raises(ValueError, match="`kind`"):
cu.with_redshift(distance=ValueError)
@pytest.mark.parametrize("kind", ["comoving", "lookback", "luminosity"])
def test_distance(self, kind):
"""Test distance equivalency."""
cosmo = Planck13
z = 15 * cu.redshift
dist = getattr(cosmo, kind + "_distance")(z)
default_cosmo = default_cosmology.get()
assert default_cosmo != cosmo # shows changing default
# 1) without specifying the cosmology
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(distance=kind)
assert_quantity_allclose(z.to(u.Mpc, equivalency), dist)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.with_redshift(distance=kind)
assert_quantity_allclose(
z.to(u.Mpc, equivalency), getattr(default_cosmo, kind + "_distance")(z)
)
assert not u.allclose(getattr(default_cosmo, kind + "_distance")(z), dist)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, distance=kind)
assert_quantity_allclose(z.to(u.Mpc, equivalency), dist)
assert_quantity_allclose(dist.to(cu.redshift, equivalency), z)
# Test atzkw
# this is really just a test that 'atzkw' doesn't fail
equivalency = cu.with_redshift(cosmo, distance=kind, atzkw={"ztol": 1e-10})
assert_quantity_allclose(dist.to(cu.redshift, equivalency), z)
def test_equivalency_context_manager():
base_registry = u.get_current_unit_registry()
# check starting with only the dimensionless_redshift equivalency.
assert len(base_registry.equivalencies) == 1
assert str(base_registry.equivalencies[0][0]) == "redshift"
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.