hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
1814677e830117cc068ebe68ad6d36ee2c0c49e2e6ec8e9dc7d2d4d41968f293 | import abc
from collections import OrderedDict
import pytest
import numpy as np
from astropy.utils.metadata import MetaData, MergeConflictError, merge, enable_merge_strategies
from astropy.utils.metadata import common_dtype
from astropy.utils import metadata
from astropy.io import fits
class OrderedDictSubclass(OrderedDict):
pass
class MetaBaseTest:
__metaclass__ = abc.ABCMeta
def test_none(self):
d = self.test_class(*self.args)
assert isinstance(d.meta, OrderedDict)
assert len(d.meta) == 0
@pytest.mark.parametrize(('meta'), ([dict([('a', 1)]),
OrderedDict([('a', 1)]),
OrderedDictSubclass([('a', 1)])]))
def test_mapping_init(self, meta):
d = self.test_class(*self.args, meta=meta)
assert type(d.meta) == type(meta)
assert d.meta['a'] == 1
@pytest.mark.parametrize(('meta'), (["ceci n'est pas un meta", 1.2, [1, 2, 3]]))
def test_non_mapping_init(self, meta):
with pytest.raises(TypeError):
self.test_class(*self.args, meta=meta)
@pytest.mark.parametrize(('meta'), ([dict([('a', 1)]),
OrderedDict([('a', 1)]),
OrderedDictSubclass([('a', 1)])]))
def test_mapping_set(self, meta):
d = self.test_class(*self.args, meta=meta)
assert type(d.meta) == type(meta)
assert d.meta['a'] == 1
@pytest.mark.parametrize(('meta'), (["ceci n'est pas un meta", 1.2, [1, 2, 3]]))
def test_non_mapping_set(self, meta):
with pytest.raises(TypeError):
d = self.test_class(*self.args, meta=meta)
def test_meta_fits_header(self):
header = fits.header.Header()
header.set('observer', 'Edwin Hubble')
header.set('exptime', '3600')
d = self.test_class(*self.args, meta=header)
assert d.meta['OBSERVER'] == 'Edwin Hubble'
class ExampleData:
meta = MetaData()
def __init__(self, meta=None):
self.meta = meta
class TestMetaExampleData(MetaBaseTest):
test_class = ExampleData
args = ()
def test_metadata_merging_conflict_exception():
"""Regression test for issue #3294.
Ensure that an exception is raised when a metadata conflict exists
and ``metadata_conflicts='error'`` has been set.
"""
data1 = ExampleData()
data2 = ExampleData()
data1.meta['somekey'] = {'x': 1, 'y': 1}
data2.meta['somekey'] = {'x': 1, 'y': 999}
with pytest.raises(MergeConflictError):
merge(data1.meta, data2.meta, metadata_conflicts='error')
def test_metadata_merging():
# Recursive merge
meta1 = {'k1': {'k1': [1, 2],
'k2': 2},
'k2': 2,
'k4': (1, 2)}
meta2 = {'k1': {'k1': [3]},
'k3': 3,
'k4': (3,)}
out = merge(meta1, meta2, metadata_conflicts='error')
assert out == {'k1': {'k2': 2,
'k1': [1, 2, 3]},
'k2': 2,
'k3': 3,
'k4': (1, 2, 3)}
# Merge two ndarrays
meta1 = {'k1': np.array([1, 2])}
meta2 = {'k1': np.array([3])}
out = merge(meta1, meta2, metadata_conflicts='error')
assert np.all(out['k1'] == np.array([1, 2, 3]))
# Merge list and np.ndarray
meta1 = {'k1': [1, 2]}
meta2 = {'k1': np.array([3])}
assert np.all(out['k1'] == np.array([1, 2, 3]))
# Can't merge two scalar types
meta1 = {'k1': 1}
meta2 = {'k1': 2}
with pytest.raises(MergeConflictError):
merge(meta1, meta2, metadata_conflicts='error')
# Conflicting shape
meta1 = {'k1': np.array([1, 2])}
meta2 = {'k1': np.array([[3]])}
with pytest.raises(MergeConflictError):
merge(meta1, meta2, metadata_conflicts='error')
# Conflicting array type
meta1 = {'k1': np.array([1, 2])}
meta2 = {'k1': np.array(['3'])}
with pytest.raises(MergeConflictError):
merge(meta1, meta2, metadata_conflicts='error')
# Conflicting array type with 'silent' merging
meta1 = {'k1': np.array([1, 2])}
meta2 = {'k1': np.array(['3'])}
out = merge(meta1, meta2, metadata_conflicts='silent')
assert np.all(out['k1'] == np.array(['3']))
def test_metadata_merging_new_strategy():
original_merge_strategies = list(metadata.MERGE_STRATEGIES)
class MergeNumbersAsList(metadata.MergeStrategy):
"""
Scalar float or int values are joined in a list.
"""
types = ((int, float), (int, float))
@classmethod
def merge(cls, left, right):
return [left, right]
class MergeConcatStrings(metadata.MergePlus):
"""
Scalar string values are concatenated
"""
types = (str, str)
enabled = False
# Normally can't merge two scalar types
meta1 = {'k1': 1, 'k2': 'a'}
meta2 = {'k1': 2, 'k2': 'b'}
# Enable new merge strategy
with enable_merge_strategies(MergeNumbersAsList, MergeConcatStrings):
assert MergeNumbersAsList.enabled
assert MergeConcatStrings.enabled
out = merge(meta1, meta2, metadata_conflicts='error')
assert out['k1'] == [1, 2]
assert out['k2'] == 'ab'
assert not MergeNumbersAsList.enabled
assert not MergeConcatStrings.enabled
# Confirm the default enabled=False behavior
with pytest.raises(MergeConflictError):
merge(meta1, meta2, metadata_conflicts='error')
# Enable all MergeStrategy subclasses
with enable_merge_strategies(metadata.MergeStrategy):
assert MergeNumbersAsList.enabled
assert MergeConcatStrings.enabled
out = merge(meta1, meta2, metadata_conflicts='error')
assert out['k1'] == [1, 2]
assert out['k2'] == 'ab'
assert not MergeNumbersAsList.enabled
assert not MergeConcatStrings.enabled
metadata.MERGE_STRATEGIES = original_merge_strategies
def test_common_dtype_string():
u3 = np.array([u'123'])
u4 = np.array([u'1234'])
b3 = np.array([b'123'])
b5 = np.array([b'12345'])
assert common_dtype([u3, u4]).endswith('U4')
assert common_dtype([b5, u4]).endswith('U5')
assert common_dtype([b3, b5]).endswith('S5')
def test_common_dtype_basic():
i8 = np.array(1, dtype=np.int64)
f8 = np.array(1, dtype=np.float64)
u3 = np.array(u'123')
with pytest.raises(MergeConflictError):
common_dtype([i8, u3])
assert common_dtype([i8, i8]).endswith('i8')
assert common_dtype([i8, f8]).endswith('f8')
|
04120efb53c1bcb0ba1189d59f732ed73e42b30423f106bc8b5f56787108515f | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.utils.data_info import dtype_info_name
STRING_TYPE_NAMES = {(True, 'S'): 'bytes',
(True, 'U'): 'str'}
DTYPE_TESTS = ((np.array(b'abcd').dtype, STRING_TYPE_NAMES[(True, 'S')] + '4'),
(np.array(u'abcd').dtype, STRING_TYPE_NAMES[(True, 'U')] + '4'),
('S4', STRING_TYPE_NAMES[(True, 'S')] + '4'),
('U4', STRING_TYPE_NAMES[(True, 'U')] + '4'),
(np.void, 'void'),
(np.int32, 'int32'),
(bool, 'bool'),
(float, 'float64'),
('<f4', 'float32'),
('u8', 'uint64'),
('c16', 'complex128'),
('object', 'object'))
@pytest.mark.parametrize('input,output', DTYPE_TESTS)
def test_dtype_info_name(input, output):
"""
Test that dtype_info_name is giving the expected output
Here the available types::
'b' boolean
'i' (signed) integer
'u' unsigned integer
'f' floating-point
'c' complex-floating point
'O' (Python) objects
'S', 'a' (byte-)string
'U' Unicode
'V' raw data (void)
"""
assert dtype_info_name(input) == output
|
04ecb782b09450941abd807de77fd527fa7e11ecc6db91c98fe4a55dcbc06993 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import hashlib
import os
import pathlib
import sys
import tempfile
import urllib.request
import urllib.error
import pytest
from astropy.utils.data import (_get_download_cache_locs, CacheMissingWarning,
get_pkg_data_filename, get_readable_fileobj, conf)
from astropy.tests.helper import raises, catch_warnings
TESTURL = 'http://www.astropy.org'
# General file object function
try:
import bz2 # noqa
except ImportError:
HAS_BZ2 = False
else:
HAS_BZ2 = True
try:
import lzma # noqa
except ImportError:
HAS_XZ = False
else:
HAS_XZ = True
@pytest.mark.remote_data(source='astropy')
def test_download_nocache():
from astropy.utils.data import download_file
fnout = download_file(TESTURL)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source='astropy')
def test_download_parallel():
from astropy.utils.data import download_files_in_parallel
main_url = conf.dataurl
mirror_url = conf.dataurl_mirror
fileloc = 'intersphinx/README'
try:
fnout = download_files_in_parallel([main_url, main_url + fileloc])
except urllib.error.URLError: # Use mirror if timed out
fnout = download_files_in_parallel([mirror_url, mirror_url + fileloc])
assert all([os.path.isfile(f) for f in fnout]), fnout
# NOTE: Does not need remote data.
def test_download_mirror_cache():
import pathlib
import shelve
from astropy.utils.data import _find_pkg_data_path, download_file, get_cached_urls
main_url = pathlib.Path(
_find_pkg_data_path(os.path.join('data', 'dataurl'))).as_uri() + '/'
mirror_url = pathlib.Path(
_find_pkg_data_path(os.path.join('data', 'dataurl_mirror'))).as_uri() + '/' # noqa
main_file = main_url + 'index.html'
mirror_file = mirror_url + 'index.html'
# Temporarily change data.conf.
# This also test https://github.com/astropy/astropy/pull/8163 because
# urlopen() on a local dir URI also gives URLError.
with conf.set_temp('dataurl', main_url):
with conf.set_temp('dataurl_mirror', mirror_url):
# "Download" files by rerouting URLs to local URIs.
download_file(main_file, cache=True)
download_file(mirror_file, cache=True)
# Now test that download_file looks in mirror's cache before
# download.
# https://github.com/astropy/astropy/issues/6982
dldir, urlmapfn = _get_download_cache_locs()
with shelve.open(urlmapfn) as url2hash:
del url2hash[main_file]
# Comparing hash makes sure they download the same file
# but does not guarantee they were downloaded from the same URL.
assert (download_file(main_file, cache=True) ==
download_file(mirror_file, cache=True))
# This has to be called after the last download to obtain
# an accurate view of cached URLs.
# This is to ensure that main_file was not re-downloaded
# unnecessarily.
# This test also tests for "assert TESTURL in get_cached_urls()".
c_urls = get_cached_urls()
assert (mirror_file in c_urls) and (main_file not in c_urls)
@pytest.mark.remote_data(source='astropy')
def test_download_noprogress():
from astropy.utils.data import download_file
fnout = download_file(TESTURL, show_progress=False)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source='astropy')
def test_download_cache():
from astropy.utils.data import download_file, clear_download_cache
download_dir = _get_download_cache_locs()[0]
# Download the test URL and make sure it exists, then clear just that
# URL and make sure it got deleted.
fnout = download_file(TESTURL, cache=True)
assert os.path.isdir(download_dir)
assert os.path.isfile(fnout)
clear_download_cache(TESTURL)
assert not os.path.exists(fnout)
# Test issues raised in #4427 with clear_download_cache() without a URL,
# followed by subsequent download.
fnout = download_file(TESTURL, cache=True)
assert os.path.isfile(fnout)
clear_download_cache()
assert not os.path.exists(fnout)
assert not os.path.exists(download_dir)
fnout = download_file(TESTURL, cache=True)
assert os.path.isfile(fnout)
# Clearing download cache succeeds even if the URL does not exist.
clear_download_cache('http://this_was_never_downloaded_before.com')
# Make sure lockdir was released
lockdir = os.path.join(download_dir, 'lock')
assert not os.path.isdir(lockdir), 'Cache dir lock was not released!'
@pytest.mark.remote_data(source='astropy')
def test_url_nocache():
with get_readable_fileobj(TESTURL, cache=False, encoding='utf-8') as page:
assert page.read().find('Astropy') > -1
@pytest.mark.remote_data(source='astropy')
def test_find_by_hash():
from astropy.utils.data import clear_download_cache
with get_readable_fileobj(TESTURL, encoding="binary", cache=True) as page:
hash = hashlib.md5(page.read())
hashstr = 'hash/' + hash.hexdigest()
fnout = get_pkg_data_filename(hashstr)
assert os.path.isfile(fnout)
clear_download_cache(hashstr[5:])
assert not os.path.isfile(fnout)
lockdir = os.path.join(_get_download_cache_locs()[0], 'lock')
assert not os.path.isdir(lockdir), 'Cache dir lock was not released!'
@pytest.mark.remote_data(source='astropy')
def test_find_invalid():
# this is of course not a real data file and not on any remote server, but
# it should *try* to go to the remote server
with pytest.raises(urllib.error.URLError):
get_pkg_data_filename('kjfrhgjklahgiulrhgiuraehgiurhgiuhreglhurieghruelighiuerahiulruli')
# Package data functions
@pytest.mark.parametrize(('filename'), ['local.dat', 'local.dat.gz',
'local.dat.bz2', 'local.dat.xz'])
def test_local_data_obj(filename):
from astropy.utils.data import get_pkg_data_fileobj
if (not HAS_BZ2 and 'bz2' in filename) or (not HAS_XZ and 'xz' in filename):
with pytest.raises(ValueError) as e:
with get_pkg_data_fileobj(os.path.join('data', filename), encoding='binary') as f:
f.readline()
# assert f.read().rstrip() == b'CONTENT'
assert ' format files are not supported' in str(e)
else:
with get_pkg_data_fileobj(os.path.join('data', filename), encoding='binary') as f:
f.readline()
assert f.read().rstrip() == b'CONTENT'
@pytest.fixture(params=['invalid.dat.bz2', 'invalid.dat.gz'])
def bad_compressed(request, tmpdir):
# These contents have valid headers for their respective file formats, but
# are otherwise malformed and invalid.
bz_content = b'BZhinvalid'
gz_content = b'\x1f\x8b\x08invalid'
datafile = tmpdir.join(request.param)
filename = datafile.strpath
if filename.endswith('.bz2'):
contents = bz_content
elif filename.endswith('.gz'):
contents = gz_content
else:
contents = 'invalid'
datafile.write(contents, mode='wb')
return filename
def test_local_data_obj_invalid(bad_compressed):
is_bz2 = bad_compressed.endswith('.bz2')
is_xz = bad_compressed.endswith('.xz')
# Note, since these invalid files are created on the fly in order to avoid
# problems with detection by antivirus software
# (see https://github.com/astropy/astropy/issues/6520), it is no longer
# possible to use ``get_pkg_data_fileobj`` to read the files. Technically,
# they're not local anymore: they just live in a temporary directory
# created by pytest. However, we can still use get_readable_fileobj for the
# test.
if (not HAS_BZ2 and is_bz2) or (not HAS_XZ and is_xz):
with pytest.raises(ValueError) as e:
with get_readable_fileobj(bad_compressed, encoding='binary') as f:
f.read()
assert ' format files are not supported' in str(e)
else:
with get_readable_fileobj(bad_compressed, encoding='binary') as f:
assert f.read().rstrip().endswith(b'invalid')
def test_local_data_name():
fnout = get_pkg_data_filename('data/local.dat')
assert os.path.isfile(fnout) and fnout.endswith('local.dat')
# TODO: if in the future, the root data/ directory is added in, the below
# test should be uncommented and the README.rst should be replaced with
# whatever file is there
# get something in the astropy root
# fnout2 = get_pkg_data_filename('../../data/README.rst')
# assert os.path.isfile(fnout2) and fnout2.endswith('README.rst')
def test_data_name_third_party_package():
"""Regression test for issue #1256
Tests that `get_pkg_data_filename` works in a third-party package that
doesn't make any relative imports from the module it's used from.
Uses a test package under ``data/test_package``.
"""
# Get the actual data dir:
data_dir = os.path.join(os.path.dirname(__file__), 'data')
sys.path.insert(0, data_dir)
try:
import test_package
filename = test_package.get_data_filename()
assert filename == os.path.join(data_dir, 'test_package', 'data',
'foo.txt')
finally:
sys.path.pop(0)
@raises(RuntimeError)
def test_local_data_nonlocalfail():
# this would go *outside* the astropy tree
get_pkg_data_filename('../../../data/README.rst')
def test_compute_hash(tmpdir):
from astropy.utils.data import compute_hash
rands = b'1234567890abcdefghijklmnopqrstuvwxyz'
filename = tmpdir.join('tmp.dat').strpath
with open(filename, 'wb') as ntf:
ntf.write(rands)
ntf.flush()
chhash = compute_hash(filename)
shash = hashlib.md5(rands).hexdigest()
assert chhash == shash
def test_get_pkg_data_contents():
from astropy.utils.data import get_pkg_data_fileobj, get_pkg_data_contents
with get_pkg_data_fileobj('data/local.dat') as f:
contents1 = f.read()
contents2 = get_pkg_data_contents('data/local.dat')
assert contents1 == contents2
@pytest.mark.remote_data(source='astropy')
def test_data_noastropy_fallback(monkeypatch):
"""
Tests to make sure the default behavior when the cache directory can't
be located is correct
"""
from astropy.utils import data
from astropy.config import paths
# needed for testing the *real* lock at the end
lockdir = os.path.join(_get_download_cache_locs()[0], 'lock')
# better yet, set the configuration to make sure the temp files are deleted
conf.delete_temporary_downloads_at_exit = True
# make sure the config and cache directories are not searched
monkeypatch.setenv(str('XDG_CONFIG_HOME'), 'foo')
monkeypatch.delenv(str('XDG_CONFIG_HOME'))
monkeypatch.setenv(str('XDG_CACHE_HOME'), 'bar')
monkeypatch.delenv(str('XDG_CACHE_HOME'))
monkeypatch.setattr(paths.set_temp_config, '_temp_path', None)
monkeypatch.setattr(paths.set_temp_cache, '_temp_path', None)
# make sure the _find_or_create_astropy_dir function fails as though the
# astropy dir could not be accessed
def osraiser(dirnm, linkto):
raise OSError
monkeypatch.setattr(paths, '_find_or_create_astropy_dir', osraiser)
with pytest.raises(OSError):
# make sure the config dir search fails
paths.get_cache_dir()
# first try with cache
with catch_warnings(CacheMissingWarning) as w:
fnout = data.download_file(TESTURL, cache=True)
assert os.path.isfile(fnout)
assert len(w) > 1
w1 = w.pop(0)
w2 = w.pop(0)
assert w1.category == CacheMissingWarning
assert 'Remote data cache could not be accessed' in w1.message.args[0]
assert w2.category == CacheMissingWarning
assert 'File downloaded to temporary location' in w2.message.args[0]
assert fnout == w2.message.args[1]
# clearing the cache should be a no-up that doesn't affect fnout
with catch_warnings(CacheMissingWarning) as w:
data.clear_download_cache(TESTURL)
assert os.path.isfile(fnout)
# now remove it so tests don't clutter up the temp dir this should get
# called at exit, anyway, but we do it here just to make sure it's working
# correctly
data._deltemps()
assert not os.path.isfile(fnout)
assert len(w) > 0
w3 = w.pop()
assert w3.category == data.CacheMissingWarning
assert 'Not clearing data cache - cache inacessable' in str(w3.message)
# now try with no cache
with catch_warnings(CacheMissingWarning) as w:
fnnocache = data.download_file(TESTURL, cache=False)
with open(fnnocache, 'rb') as page:
assert page.read().decode('utf-8').find('Astropy') > -1
# no warnings should be raise in fileobj because cache is unnecessary
assert len(w) == 0
# lockdir determined above as the *real* lockdir, not the temp one
assert not os.path.isdir(lockdir), 'Cache dir lock was not released!'
@pytest.mark.parametrize(('filename'), [
'unicode.txt',
'unicode.txt.gz',
pytest.param('unicode.txt.bz2', marks=pytest.mark.xfail(not HAS_BZ2, reason='no bz2 support')),
pytest.param('unicode.txt.xz', marks=pytest.mark.xfail(not HAS_XZ, reason='no lzma support'))])
def test_read_unicode(filename):
from astropy.utils.data import get_pkg_data_contents
contents = get_pkg_data_contents(os.path.join('data', filename), encoding='utf-8')
assert isinstance(contents, str)
contents = contents.splitlines()[1]
assert contents == "האסטרונומי פייתון"
contents = get_pkg_data_contents(os.path.join('data', filename), encoding='binary')
assert isinstance(contents, bytes)
x = contents.splitlines()[1]
assert x == (b"\xff\xd7\x94\xd7\x90\xd7\xa1\xd7\x98\xd7\xa8\xd7\x95\xd7\xa0"
b"\xd7\x95\xd7\x9e\xd7\x99 \xd7\xa4\xd7\x99\xd7\x99\xd7\xaa\xd7\x95\xd7\x9f"[1:])
def test_compressed_stream():
import base64
gzipped_data = (b"H4sICIxwG1AAA2xvY2FsLmRhdAALycgsVkjLzElVANKlxakpCpl5CiUZqQ"
b"olqcUl8Tn5yYk58SmJJYnxWmCRzLx0hbTSvOSSzPy8Yi5nf78QV78QLgAlLytnRQAAAA==")
gzipped_data = base64.b64decode(gzipped_data)
assert isinstance(gzipped_data, bytes)
class FakeStream:
"""
A fake stream that has `read`, but no `seek`.
"""
def __init__(self, data):
self.data = data
def read(self, nbytes=None):
if nbytes is None:
result = self.data
self.data = b''
else:
result = self.data[:nbytes]
self.data = self.data[nbytes:]
return result
stream = FakeStream(gzipped_data)
with get_readable_fileobj(stream, encoding='binary') as f:
f.readline()
assert f.read().rstrip() == b'CONTENT'
@pytest.mark.remote_data(source='astropy')
def test_invalid_location_download():
"""
checks that download_file gives a URLError and not an AttributeError,
as its code pathway involves some fiddling with the exception.
"""
from astropy.utils.data import download_file
with pytest.raises(urllib.error.URLError):
download_file('http://www.astropy.org/nonexistentfile')
def test_invalid_location_download_noconnect():
"""
checks that download_file gives an OSError if the socket is blocked
"""
from astropy.utils.data import download_file
# This should invoke socket's monkeypatched failure
with pytest.raises(OSError):
download_file('http://astropy.org/nonexistentfile')
@pytest.mark.remote_data(source='astropy')
def test_is_url_in_cache():
from astropy.utils.data import download_file, is_url_in_cache
assert not is_url_in_cache('http://astropy.org/nonexistentfile')
download_file(TESTURL, cache=True, show_progress=False)
assert is_url_in_cache(TESTURL)
def test_get_readable_fileobj_cleans_up_temporary_files(tmpdir, monkeypatch):
"""checks that get_readable_fileobj leaves no temporary files behind"""
# Create a 'file://' URL pointing to a path on the local filesystem
local_filename = get_pkg_data_filename(os.path.join('data', 'local.dat'))
url = 'file://' + urllib.request.pathname2url(local_filename)
# Save temporary files to a known location
monkeypatch.setattr(tempfile, 'tempdir', str(tmpdir))
# Call get_readable_fileobj() as a context manager
with get_readable_fileobj(url):
pass
# Get listing of files in temporary directory
tempdir_listing = tmpdir.listdir()
# Assert that the temporary file was empty after get_readable_fileobj()
# context manager finished running
assert len(tempdir_listing) == 0
def test_path_objects_get_readable_fileobj():
fpath = pathlib.Path(get_pkg_data_filename(os.path.join('data', 'local.dat')))
with get_readable_fileobj(fpath) as f:
assert f.read().rstrip() == ('This file is used in the test_local_data_* '
'testing functions\nCONTENT')
|
6cf09674c397e73d920e0ba0ed09d0689ea59bd8738a4bef96ba76c26a70faa4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import io
import pytest
from . import test_progress_bar_func
from astropy.utils import console
from astropy import units as u
class FakeTTY(io.StringIO):
"""IOStream that fakes a TTY; provide an encoding to emulate an output
stream with a specific encoding.
"""
def __new__(cls, encoding=None):
# Return a new subclass of FakeTTY with the requested encoding
if encoding is None:
return super().__new__(cls)
encoding = encoding
cls = type(encoding.title() + cls.__name__, (cls,),
{'encoding': encoding})
return cls.__new__(cls)
def __init__(self, encoding=None):
super().__init__()
def write(self, s):
if isinstance(s, bytes):
# Just allow this case to work
s = s.decode('latin-1')
elif self.encoding is not None:
s.encode(self.encoding)
return super().write(s)
def isatty(self):
return True
def test_fake_tty():
# First test without a specified encoding; we should be able to write
# arbitrary unicode strings
f1 = FakeTTY()
assert f1.isatty()
f1.write('☃')
assert f1.getvalue() == '☃'
# Now test an ASCII-only TTY--it should raise a UnicodeEncodeError when
# trying to write a string containing non-ASCII characters
f2 = FakeTTY('ascii')
assert f2.isatty()
assert f2.__class__.__name__ == 'AsciiFakeTTY'
assert pytest.raises(UnicodeEncodeError, f2.write, '☃')
assert f2.getvalue() == ''
@pytest.mark.skipif(str("sys.platform.startswith('win')"))
def test_color_text():
assert console._color_text("foo", "green") == '\033[0;32mfoo\033[0m'
def test_color_print():
# This stuff is hard to test, at least smoke test it
console.color_print("foo", "green")
console.color_print("foo", "green", "bar", "red")
def test_color_print2():
# Test that this automatically detects that io.StringIO is
# not a tty
stream = io.StringIO()
console.color_print("foo", "green", file=stream)
assert stream.getvalue() == 'foo\n'
stream = io.StringIO()
console.color_print("foo", "green", "bar", "red", "baz", file=stream)
assert stream.getvalue() == 'foobarbaz\n'
@pytest.mark.skipif(str("sys.platform.startswith('win')"))
def test_color_print3():
# Test that this thinks the FakeTTY is a tty and applies colors.
stream = FakeTTY()
console.color_print("foo", "green", file=stream)
assert stream.getvalue() == '\x1b[0;32mfoo\x1b[0m\n'
stream = FakeTTY()
console.color_print("foo", "green", "bar", "red", "baz", file=stream)
assert stream.getvalue() == '\x1b[0;32mfoo\x1b[0m\x1b[0;31mbar\x1b[0mbaz\n'
def test_color_print_unicode():
console.color_print("überbær", "red")
def test_color_print_invalid_color():
console.color_print("foo", "unknown")
def test_spinner_non_unicode_console():
"""Regression test for #1760
Ensures that the spinner can fall go into fallback mode when using the
unicode spinner on a terminal whose default encoding cannot encode the
unicode characters.
"""
stream = FakeTTY('ascii')
chars = console.Spinner._default_unicode_chars
with console.Spinner("Reticulating splines", file=stream,
chars=chars) as s:
next(s)
def test_progress_bar():
# This stuff is hard to test, at least smoke test it
with console.ProgressBar(50) as bar:
for i in range(50):
bar.update()
def test_progress_bar2():
for x in console.ProgressBar(range(50)):
pass
def test_progress_bar3():
def do_nothing(*args, **kwargs):
pass
console.ProgressBar.map(do_nothing, range(50))
def test_zero_progress_bar():
with console.ProgressBar(0) as bar:
pass
def test_progress_bar_as_generator():
sum = 0
for x in console.ProgressBar(range(50)):
sum += x
assert sum == 1225
sum = 0
for x in console.ProgressBar(50):
sum += x
assert sum == 1225
def test_progress_bar_map():
items = list(range(100))
result = console.ProgressBar.map(test_progress_bar_func.func,
items, step=10, multiprocess=True)
assert items == result
result1 = console.ProgressBar.map(test_progress_bar_func.func,
items, step=10, multiprocess=2)
assert items == result1
@pytest.mark.parametrize(("seconds", "string"),
[(864088, " 1w 3d"),
(187213, " 2d 4h"),
(3905, " 1h 5m"),
(64, " 1m 4s"),
(15, " 15s"),
(2, " 2s")]
)
def test_human_time(seconds, string):
human_time = console.human_time(seconds)
assert human_time == string
@pytest.mark.parametrize(("size", "string"),
[(8640882, "8.6M"),
(187213, "187k"),
(3905, "3.9k"),
(64, " 64 "),
(2, " 2 "),
(10*u.GB, " 10G")]
)
def test_human_file_size(size, string):
human_time = console.human_file_size(size)
assert human_time == string
@pytest.mark.parametrize("size", (50*u.km, 100*u.g))
def test_bad_human_file_size(size):
assert pytest.raises(u.UnitConversionError, console.human_file_size, size)
|
08aa4d38f0b110dd0a7591dc4a7d67910782cab2177628351b3b1b6dfb7e473b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# namedtuple is needed for find_mod_objs so it can have a non-local module
from collections import namedtuple
import pytest
from astropy.utils import introspection
from astropy.utils.introspection import (find_current_module, find_mod_objs,
isinstancemethod, minversion)
def test_pkg_finder():
"""
Tests that the `find_current_module` function works. Note that
this also implicitly tests compat.misc._patched_getmodule
"""
mod1 = 'astropy.utils.introspection'
mod2 = 'astropy.utils.tests.test_introspection'
mod3 = 'astropy.utils.tests.test_introspection'
assert find_current_module(0).__name__ == mod1
assert find_current_module(1).__name__ == mod2
assert find_current_module(0, True).__name__ == mod3
def test_find_current_mod():
from sys import getrecursionlimit
thismodnm = __name__
assert find_current_module(0) is introspection
assert find_current_module(1).__name__ == thismodnm
assert find_current_module(getrecursionlimit() + 1) is None
assert find_current_module(0, True).__name__ == thismodnm
assert find_current_module(0, [introspection]).__name__ == thismodnm
assert find_current_module(0, ['astropy.utils.introspection']).__name__ == thismodnm
with pytest.raises(ImportError):
find_current_module(0, ['faddfdsasewrweriopunjlfiurrhujnkflgwhu'])
def test_find_mod_objs():
lnms, fqns, objs = find_mod_objs('astropy')
# this import is after the above call intentionally to make sure
# find_mod_objs properly imports astropy on its own
import astropy
# just check for astropy.test ... other things might be added, so we
# shouldn't check that it's the only thing
assert 'test' in lnms
assert astropy.test in objs
lnms, fqns, objs = find_mod_objs(__name__, onlylocals=False)
assert 'namedtuple' in lnms
assert 'collections.namedtuple' in fqns
assert namedtuple in objs
lnms, fqns, objs = find_mod_objs(__name__, onlylocals=True)
assert 'namedtuple' not in lnms
assert 'collections.namedtuple' not in fqns
assert namedtuple not in objs
def test_minversion():
from types import ModuleType
test_module = ModuleType(str("test_module"))
test_module.__version__ = '0.12.2'
good_versions = ['0.12', '0.12.1', '0.12.0.dev', '0.12dev']
bad_versions = ['1', '1.2rc1']
for version in good_versions:
assert minversion(test_module, version)
for version in bad_versions:
assert not minversion(test_module, version)
|
c170ea5afa229bb3630e06d5a630ab03acce696b4d4460720182cef11bee9128 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.tests.helper import raises
from astropy.utils import collections
@raises(TypeError)
def test_homogeneous_list():
l = collections.HomogeneousList(int)
l.append(5.0)
@raises(TypeError)
def test_homogeneous_list2():
l = collections.HomogeneousList(int)
l.extend([5.0])
def test_homogeneous_list3():
l = collections.HomogeneousList(int)
l.append(5)
assert l == [5]
def test_homogeneous_list4():
l = collections.HomogeneousList(int)
l.extend([5])
assert l == [5]
@raises(TypeError)
def test_homogeneous_list5():
l = collections.HomogeneousList(int, [1, 2, 3])
l[1] = 5.0
def test_homogeneous_list_setitem_works():
l = collections.HomogeneousList(int, [1, 2, 3])
l[1] = 5
assert l == [1, 5, 3]
def test_homogeneous_list_setitem_works_with_slice():
l = collections.HomogeneousList(int, [1, 2, 3])
l[0:1] = [10, 20, 30]
assert l == [10, 20, 30, 2, 3]
l[:] = [5, 4, 3]
assert l == [5, 4, 3]
l[::2] = [2, 1]
assert l == [2, 4, 1]
def test_homogeneous_list_init_got_invalid_type():
with pytest.raises(TypeError):
collections.HomogeneousList(int, [1, 2., 3])
def test_homogeneous_list_works_with_generators():
hl = collections.HomogeneousList(int, (i for i in range(3)))
assert hl == [0, 1, 2]
hl = collections.HomogeneousList(int)
hl.extend(i for i in range(3))
assert hl == [0, 1, 2]
hl = collections.HomogeneousList(int)
hl[0:1] = (i for i in range(3))
assert hl == [0, 1, 2]
hl = collections.HomogeneousList(int)
hl += (i for i in range(3))
assert hl == [0, 1, 2]
|
c2b2d77c3101ba962b7edc7b7b8e2434051e5872f35a5f9ae6e559d0cbd27ece | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import inspect
import pickle
import pytest
from astropy.utils.decorators import (deprecated_attribute, deprecated, wraps,
sharedmethod, classproperty,
format_doc, deprecated_renamed_argument)
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from astropy.tests.helper import catch_warnings
class NewDeprecationWarning(AstropyDeprecationWarning):
"""
New Warning subclass to be used to test the deprecated decorator's
``warning_type`` parameter.
"""
def test_wraps():
"""
Tests the compatibility replacement for functools.wraps which supports
argument preservation across all supported Python versions.
"""
def foo(a, b, c=1, d=2, e=3, **kwargs):
"""A test function."""
return a, b, c, d, e, kwargs
@wraps(foo)
def bar(*args, **kwargs):
return ('test',) + foo(*args, **kwargs)
expected = ('test', 1, 2, 3, 4, 5, {'f': 6, 'g': 7})
assert bar(1, 2, 3, 4, 5, f=6, g=7) == expected
assert bar.__name__ == 'foo'
if foo.__doc__ is not None:
# May happen if using optimized opcode
assert bar.__doc__ == "A test function."
if hasattr(foo, '__qualname__'):
assert bar.__qualname__ == foo.__qualname__
sig = inspect.signature(bar)
assert list(sig.parameters) == ['a', 'b', 'c', 'd', 'e', 'kwargs']
defaults = [inspect._empty, inspect._empty, 1, 2, 3, inspect._empty]
assert [p.default for p in sig.parameters.values()] == defaults
def test_deprecated_attribute():
class DummyClass:
def __init__(self):
self._foo = 42
self._bar = 4242
def set_private(self):
self._foo = 100
self._bar = 1000
foo = deprecated_attribute('foo', '0.2')
bar = deprecated_attribute('bar', '0.2',
warning_type=NewDeprecationWarning)
dummy = DummyClass()
with catch_warnings(AstropyDeprecationWarning) as wfoo:
dummy.foo
with catch_warnings(AstropyDeprecationWarning) as wbar:
dummy.bar
assert len(wfoo) == 1
assert str(wfoo[0].message) == ("The foo attribute is deprecated and may "
"be removed in a future version.")
assert wfoo[0].category == AstropyDeprecationWarning
assert len(wbar) == 1
assert str(wbar[0].message) == ("The bar attribute is deprecated and may "
"be removed in a future version.")
assert wbar[0].category == NewDeprecationWarning
with catch_warnings() as w:
dummy.set_private()
assert len(w) == 0
# This needs to be defined outside of the test function, because we
# want to try to pickle it.
@deprecated('100.0')
class TA:
"""
This is the class docstring.
"""
def __init__(self):
"""
This is the __init__ docstring
"""
pass
class TMeta(type):
metaclass_attr = 1
@deprecated('100.0')
class TB(metaclass=TMeta):
pass
@deprecated('100.0', warning_type=NewDeprecationWarning)
class TC:
"""
This class has the custom warning.
"""
pass
def test_deprecated_class():
orig_A = TA.__bases__[0]
# The only thing that should be different about the new class
# is __doc__, __init__, __bases__ and __subclasshook__.
# and __init_subclass__ for Python 3.6+.
for x in dir(orig_A):
if x not in ('__doc__', '__init__', '__bases__', '__dict__',
'__subclasshook__', '__init_subclass__'):
assert getattr(TA, x) == getattr(orig_A, x)
with catch_warnings(AstropyDeprecationWarning) as w:
TA()
assert len(w) == 1
if TA.__doc__ is not None:
assert 'function' not in TA.__doc__
assert 'deprecated' in TA.__doc__
assert 'function' not in TA.__init__.__doc__
assert 'deprecated' in TA.__init__.__doc__
# Make sure the object is picklable
pickle.dumps(TA)
with catch_warnings(NewDeprecationWarning) as w:
TC()
assert len(w) == 1
assert w[0].category == NewDeprecationWarning
def test_deprecated_class_with_new_method():
"""
Test that a class with __new__ method still works even if it accepts
additional arguments.
This previously failed because the deprecated decorator would wrap objects
__init__ which takes no arguments.
"""
@deprecated('1.0')
class A:
def __new__(cls, a):
return super().__new__(cls)
# Creating an instance should work but raise a DeprecationWarning
with catch_warnings(AstropyDeprecationWarning) as w:
A(1)
assert len(w) == 1
@deprecated('1.0')
class B:
def __new__(cls, a):
return super().__new__(cls)
def __init__(self, a):
pass
# Creating an instance should work but raise a DeprecationWarning
with catch_warnings(AstropyDeprecationWarning) as w:
B(1)
assert len(w) == 1
def test_deprecated_class_with_super():
"""
Regression test for an issue where classes that used ``super()`` in their
``__init__`` did not actually call the correct class's ``__init__`` in the
MRO.
"""
@deprecated('100.0')
class TB:
def __init__(self, a, b):
super().__init__()
with catch_warnings(AstropyDeprecationWarning) as w:
TB(1, 2)
assert len(w) == 1
if TB.__doc__ is not None:
assert 'function' not in TB.__doc__
assert 'deprecated' in TB.__doc__
assert 'function' not in TB.__init__.__doc__
assert 'deprecated' in TB.__init__.__doc__
def test_deprecated_class_with_custom_metaclass():
"""
Regression test for an issue where deprecating a class with a metaclass
other than type did not restore the metaclass properly.
"""
with catch_warnings(AstropyDeprecationWarning) as w:
TB()
assert len(w) == 1
assert type(TB) is TMeta
assert TB.metaclass_attr == 1
def test_deprecated_static_and_classmethod():
"""
Regression test for issue introduced by
https://github.com/astropy/astropy/pull/2811 and mentioned also here:
https://github.com/astropy/astropy/pull/2580#issuecomment-51049969
where it appears that deprecated staticmethods didn't work on Python 2.6.
"""
class A:
"""Docstring"""
@deprecated('1.0')
@staticmethod
def B():
pass
@deprecated('1.0')
@classmethod
def C(cls):
pass
with catch_warnings(AstropyDeprecationWarning) as w:
A.B()
assert len(w) == 1
if A.__doc__ is not None:
assert 'deprecated' in A.B.__doc__
with catch_warnings(AstropyDeprecationWarning) as w:
A.C()
assert len(w) == 1
if A.__doc__ is not None:
assert 'deprecated' in A.C.__doc__
def test_deprecated_argument():
# Tests the decorator with function, method, staticmethod and classmethod.
class Test:
@classmethod
@deprecated_renamed_argument('clobber', 'overwrite', '1.3')
def test1(cls, overwrite):
return overwrite
@staticmethod
@deprecated_renamed_argument('clobber', 'overwrite', '1.3')
def test2(overwrite):
return overwrite
@deprecated_renamed_argument('clobber', 'overwrite', '1.3')
def test3(self, overwrite):
return overwrite
@deprecated_renamed_argument('clobber', 'overwrite', '1.3',
warning_type=NewDeprecationWarning)
def test4(self, overwrite):
return overwrite
@deprecated_renamed_argument('clobber', 'overwrite', '1.3', relax=False)
def test1(overwrite):
return overwrite
for method in [Test().test1, Test().test2, Test().test3, Test().test4, test1]:
# As positional argument only
assert method(1) == 1
# As new keyword argument
assert method(overwrite=1) == 1
# Using the deprecated name
with catch_warnings(AstropyDeprecationWarning) as w:
assert method(clobber=1) == 1
assert len(w) == 1
assert '1.3' in str(w[0].message)
assert 'test_decorators.py' in str(w[0].filename)
if method.__name__ == 'test4':
w[0].category == NewDeprecationWarning
# Using both. Both keyword
with pytest.raises(TypeError):
method(clobber=2, overwrite=1)
# One positional, one keyword
with pytest.raises(TypeError):
method(1, clobber=2)
def test_deprecated_argument_in_kwargs():
# To rename an argument that is consumed by "kwargs" the "arg_in_kwargs"
# parameter is used.
@deprecated_renamed_argument('clobber', 'overwrite', '1.3',
arg_in_kwargs=True)
def test(**kwargs):
return kwargs['overwrite']
# As positional argument only
with pytest.raises(TypeError):
test(1)
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
with catch_warnings(AstropyDeprecationWarning) as w:
assert test(clobber=1) == 1
assert len(w) == 1
assert '1.3' in str(w[0].message)
assert 'test_decorators.py' in str(w[0].filename)
# Using both. Both keyword
with pytest.raises(TypeError):
test(clobber=2, overwrite=1)
# One positional, one keyword
with pytest.raises(TypeError):
test(1, clobber=2)
def test_deprecated_argument_relaxed():
# Relax turns the TypeError if both old and new keyword are used into
# a warning.
@deprecated_renamed_argument('clobber', 'overwrite', '1.3', relax=True)
def test(overwrite):
return overwrite
# As positional argument only
assert test(1) == 1
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
with catch_warnings(AstropyDeprecationWarning) as w:
assert test(clobber=1) == 1
assert len(w) == 1
assert '1.3' in str(w[0].message)
# Using both. Both keyword
with catch_warnings(AstropyUserWarning) as w:
assert test(clobber=2, overwrite=1) == 1
assert len(w) == 1
# One positional, one keyword
with catch_warnings(AstropyUserWarning) as w:
assert test(1, clobber=2) == 1
assert len(w) == 1
def test_deprecated_argument_pending():
# Relax turns the TypeError if both old and new keyword are used into
# a warning.
@deprecated_renamed_argument('clobber', 'overwrite', '1.3', pending=True)
def test(overwrite):
return overwrite
# As positional argument only
assert test(1) == 1
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
with catch_warnings(AstropyUserWarning, AstropyDeprecationWarning) as w:
assert test(clobber=1) == 1
assert len(w) == 0
# Using both. Both keyword
with catch_warnings(AstropyUserWarning, AstropyDeprecationWarning) as w:
assert test(clobber=2, overwrite=1) == 1
assert len(w) == 0
# One positional, one keyword
with catch_warnings(AstropyUserWarning, AstropyDeprecationWarning) as w:
assert test(1, clobber=2) == 1
assert len(w) == 0
def test_deprecated_argument_multi_deprecation():
@deprecated_renamed_argument(['x', 'y', 'z'], ['a', 'b', 'c'],
[1.3, 1.2, 1.3], relax=True)
def test(a, b, c):
return a, b, c
with catch_warnings(AstropyDeprecationWarning) as w:
assert test(x=1, y=2, z=3) == (1, 2, 3)
assert len(w) == 3
# Make sure relax is valid for all arguments
with catch_warnings(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, b=3) == (1, 3, 3)
assert len(w) == 1
with catch_warnings(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, a=3) == (3, 2, 3)
assert len(w) == 1
with catch_warnings(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, c=5) == (1, 2, 5)
assert len(w) == 1
def test_deprecated_argument_multi_deprecation_2():
@deprecated_renamed_argument(['x', 'y', 'z'], ['a', 'b', 'c'],
[1.3, 1.2, 1.3], relax=[True, True, False])
def test(a, b, c):
return a, b, c
with catch_warnings(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, b=3) == (1, 3, 3)
assert len(w) == 1
with catch_warnings(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, a=3) == (3, 2, 3)
assert len(w) == 1
with pytest.raises(TypeError):
assert test(x=1, y=2, z=3, c=5) == (1, 2, 5)
def test_deprecated_argument_not_allowed_use():
# If the argument is supposed to be inside the kwargs one needs to set the
# arg_in_kwargs parameter. Without it it raises a TypeError.
with pytest.raises(TypeError):
@deprecated_renamed_argument('clobber', 'overwrite', '1.3')
def test1(**kwargs):
return kwargs['overwrite']
# Cannot replace "*args".
with pytest.raises(TypeError):
@deprecated_renamed_argument('overwrite', 'args', '1.3')
def test2(*args):
return args
# Cannot replace "**kwargs".
with pytest.raises(TypeError):
@deprecated_renamed_argument('overwrite', 'kwargs', '1.3')
def test3(**kwargs):
return kwargs
def test_deprecated_argument_remove():
@deprecated_renamed_argument('x', None, '2.0', alternative='astropy.y')
def test(dummy=11):
return dummy
with catch_warnings(AstropyDeprecationWarning) as w:
assert test(x=1) == 11
assert len(w) == 1
assert 'Use astropy.y instead' in str(w[0].message)
with catch_warnings(AstropyDeprecationWarning) as w:
assert test(x=1, dummy=10) == 10
assert len(w) == 1
assert test() == 11
def test_sharedmethod_reuse_on_subclasses():
"""
Regression test for an issue where sharedmethod would bind to one class
for all time, causing the same method not to work properly on other
subclasses of that class.
It has the same problem when the same sharedmethod is called on different
instances of some class as well.
"""
class AMeta(type):
def foo(cls):
return cls.x
class A:
x = 3
def __init__(self, x):
self.x = x
@sharedmethod
def foo(self):
return self.x
a1 = A(1)
a2 = A(2)
assert a1.foo() == 1
assert a2.foo() == 2
# Similar test now, but for multiple subclasses using the same sharedmethod
# as a classmethod
assert A.foo() == 3
class B(A):
x = 5
assert B.foo() == 5
def test_classproperty_docstring():
"""
Tests that the docstring is set correctly on classproperties.
This failed previously due to a bug in Python that didn't always
set __doc__ properly on instances of property subclasses.
"""
class A:
# Inherits docstring from getter
@classproperty
def foo(cls):
"""The foo."""
return 1
assert A.__dict__['foo'].__doc__ == "The foo."
class B:
# Use doc passed to classproperty constructor
def _get_foo(cls): return 1
foo = classproperty(_get_foo, doc="The foo.")
assert B.__dict__['foo'].__doc__ == "The foo."
def test_format_doc_stringInput_simple():
# Simple tests with string input
docstring_fail = ''
# Raises an valueerror if input is empty
with pytest.raises(ValueError):
@format_doc(docstring_fail)
def testfunc_fail():
pass
docstring = 'test'
# A first test that replaces an empty docstring
@format_doc(docstring)
def testfunc_1():
pass
assert inspect.getdoc(testfunc_1) == docstring
# Test that it replaces an existing docstring
@format_doc(docstring)
def testfunc_2():
'''not test'''
pass
assert inspect.getdoc(testfunc_2) == docstring
def test_format_doc_stringInput_format():
# Tests with string input and formatting
docstring = 'yes {0} no {opt}'
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(docstring)
def testfunc1():
pass
# Test that the formatting is done right
@format_doc(docstring, '/', opt='= life')
def testfunc2():
pass
assert inspect.getdoc(testfunc2) == 'yes / no = life'
# Test that we can include the original docstring
docstring2 = 'yes {0} no {__doc__}'
@format_doc(docstring2, '/')
def testfunc3():
'''= 2 / 2 * life'''
pass
assert inspect.getdoc(testfunc3) == 'yes / no = 2 / 2 * life'
def test_format_doc_objectInput_simple():
# Simple tests with object input
def docstring_fail():
pass
# Self input while the function has no docstring raises an error
with pytest.raises(ValueError):
@format_doc(docstring_fail)
def testfunc_fail():
pass
def docstring0():
'''test'''
pass
# A first test that replaces an empty docstring
@format_doc(docstring0)
def testfunc_1():
pass
assert inspect.getdoc(testfunc_1) == inspect.getdoc(docstring0)
# Test that it replaces an existing docstring
@format_doc(docstring0)
def testfunc_2():
'''not test'''
pass
assert inspect.getdoc(testfunc_2) == inspect.getdoc(docstring0)
def test_format_doc_objectInput_format():
# Tests with object input and formatting
def docstring():
'''test {0} test {opt}'''
pass
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(docstring)
def testfunc_fail():
pass
# Test that the formatting is done right
@format_doc(docstring, '+', opt='= 2 * test')
def testfunc2():
pass
assert inspect.getdoc(testfunc2) == 'test + test = 2 * test'
# Test that we can include the original docstring
def docstring2():
'''test {0} test {__doc__}'''
pass
@format_doc(docstring2, '+')
def testfunc3():
'''= 4 / 2 * test'''
pass
assert inspect.getdoc(testfunc3) == 'test + test = 4 / 2 * test'
def test_format_doc_selfInput_simple():
# Simple tests with self input
# Self input while the function has no docstring raises an error
with pytest.raises(ValueError):
@format_doc(None)
def testfunc_fail():
pass
# Test that it keeps an existing docstring
@format_doc(None)
def testfunc_1():
'''not test'''
pass
assert inspect.getdoc(testfunc_1) == 'not test'
def test_format_doc_selfInput_format():
# Tests with string input which is '__doc__' (special case) and formatting
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(None)
def testfunc_fail():
'''dum {0} dum {opt}'''
pass
# Test that the formatting is done right
@format_doc(None, 'di', opt='da dum')
def testfunc1():
'''dum {0} dum {opt}'''
pass
assert inspect.getdoc(testfunc1) == 'dum di dum da dum'
# Test that we cannot recursively insert the original documentation
@format_doc(None, 'di')
def testfunc2():
'''dum {0} dum {__doc__}'''
pass
assert inspect.getdoc(testfunc2) == 'dum di dum '
def test_format_doc_onMethod():
# Check if the decorator works on methods too, to spice it up we try double
# decorator
docstring = 'what we do {__doc__}'
class TestClass:
@format_doc(docstring)
@format_doc(None, 'strange.')
def test_method(self):
'''is {0}'''
pass
assert inspect.getdoc(TestClass.test_method) == 'what we do is strange.'
def test_format_doc_onClass():
# Check if the decorator works on classes too
docstring = 'what we do {__doc__} {0}{opt}'
@format_doc(docstring, 'strange', opt='.')
class TestClass:
'''is'''
pass
assert inspect.getdoc(TestClass) == 'what we do is strange.'
|
696dc61fcd281b11994c7360394bba4c5becb26ced300fff2b6e83e87c9d4064 | import time
import numpy as np
from astropy.utils.misc import NumpyRNGContext
def func(i):
"""An identity function that jitters its execution time by a
pseudo-random amount.
FIXME: This function should be defined in test_console.py, but Astropy's
`python setup.py test` interacts strangely with Python's `multiprocessing`
module. I was getting a mysterious PicklingError until I moved this
function into a separate module. (It worked fine in a standalone pytest
script.)"""
with NumpyRNGContext(i):
time.sleep(np.random.uniform(0, 0.01))
return i
|
f194d120cfdc29d3ddd0c16f515dacea6ad2e3b01bab028f58c7e3125181e7a6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a collection of monkey patches and workarounds for bugs in
earlier versions of Numpy.
"""
from astropy.utils import minversion
__all__ = ['NUMPY_LT_1_14', 'NUMPY_LT_1_14_1', 'NUMPY_LT_1_14_2',
'NUMPY_LT_1_16']
# TODO: It might also be nice to have aliases to these named for specific
# features/bugs we're checking for (ex:
# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)
NUMPY_LT_1_14 = not minversion('numpy', '1.14')
NUMPY_LT_1_14_1 = not minversion('numpy', '1.14.1')
NUMPY_LT_1_14_2 = not minversion('numpy', '1.14.2')
NUMPY_LT_1_16 = not minversion('numpy', '1.16')
|
d0ca735d6f5ef1d67fb39cf4a175f9de22f3d749a368583037b55ed26be8b60b | from inspect import signature, Parameter, Signature, BoundArguments
__all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature']
import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn("astropy.utils.compat.funcsigs is now deprecated - "
"use inspect instead", AstropyDeprecationWarning)
|
4ad1830cd9521bd35dc2340c1eb1dc916ec2a2c1afa0a24bc5a986f8158c5a6d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Simple utility functions and bug fixes for compatibility with all supported
versions of Python. This module should generally not be used directly, as
everything in `__all__` will be imported into `astropy.utils.compat` and can
be accessed from there.
"""
import sys
import functools
from contextlib import suppress
__all__ = ['override__dir__', 'suppress',
'possible_filename', 'namedtuple_asdict']
def possible_filename(filename):
"""
Determine if the ``filename`` argument is an allowable type for a filename.
In Python 3.3 use of non-unicode filenames on system calls such as
`os.stat` and others that accept a filename argument was deprecated (and
may be removed outright in the future).
Therefore this returns `True` in all cases except for `bytes` strings in
Windows.
"""
if isinstance(filename, str):
return True
elif isinstance(filename, bytes):
return not (sys.platform == 'win32')
return False
def override__dir__(f):
"""
When overriding a __dir__ method on an object, you often want to
include the "standard" members on the object as well. This
decorator takes care of that automatically, and all the wrapped
function needs to do is return a list of the "special" members
that wouldn't be found by the normal Python means.
Example
-------
@override__dir__
def __dir__(self):
return ['special_method1', 'special_method2']
"""
# http://bugs.python.org/issue12166
@functools.wraps(f)
def override__dir__wrapper(self):
members = set(object.__dir__(self))
members.update(f(self))
return sorted(members)
return override__dir__wrapper
def namedtuple_asdict(namedtuple):
"""
The same as ``namedtuple._adict()``.
Parameters
----------
namedtuple : collections.namedtuple
The named tuple to get the dict of
"""
return namedtuple._asdict()
|
b8202833d6dc7266289c26028e925f26c8edb47182e7cfd4bb7e080e9f1cb9e0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import urllib.request
import warnings
import pytest
import numpy as np
from astropy.tests.helper import assert_quantity_allclose, catch_warnings
from astropy.utils.iers import iers
from astropy import units as u
from astropy.table import QTable
from astropy.time import Time, TimeDelta
FILE_NOT_FOUND_ERROR = getattr(__builtins__, 'FileNotFoundError', OSError)
try:
iers.IERS_A.open('finals2000A.all') # check if IERS_A is available
except OSError:
HAS_IERS_A = False
else:
HAS_IERS_A = True
IERS_A_EXCERPT = os.path.join(os.path.dirname(__file__), 'data', 'iers_a_excerpt')
class TestBasic():
"""Basic tests that IERS_B returns correct values"""
def test_simple(self):
iers.IERS.close()
assert iers.IERS.iers_table is None
iers_tab = iers.IERS.open()
assert iers.IERS.iers_table is not None
assert isinstance(iers.IERS.iers_table, QTable)
assert (iers_tab['UT1_UTC'].unit / u.second).is_unity()
assert (iers_tab['PM_x'].unit / u.arcsecond).is_unity()
assert (iers_tab['PM_y'].unit / u.arcsecond).is_unity()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc = iers_tab.ut1_utc(jd1, jd2)
assert isinstance(ut1_utc, u.Quantity)
assert (ut1_utc.unit / u.second).is_unity()
# IERS files change at the 0.1 ms level; see gh-6981
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
# should be future-proof; surely we've moved to another planet by then
with pytest.raises(IndexError):
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.)
# also check it returns the right status
ut1_utc2, status2 = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status2 == iers.FROM_IERS_B)
ut1_utc4, status4 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status4 == iers.TIME_BEYOND_IERS_RANGE
# check it works via Time too
t = Time(jd1, jd2, format='jd', scale='utc')
ut1_utc3 = iers_tab.ut1_utc(t)
assert_quantity_allclose(ut1_utc3, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
def test_open_filename(self):
iers.IERS.close()
iers.IERS.open(iers.IERS_B_FILE)
assert iers.IERS.iers_table is not None
assert isinstance(iers.IERS.iers_table, QTable)
iers.IERS.close()
with pytest.raises(FILE_NOT_FOUND_ERROR):
iers.IERS.open('surely this does not exist')
def test_open_network_url(self):
iers.IERS_A.close()
iers.IERS_A.open("file:" + urllib.request.pathname2url(IERS_A_EXCERPT))
assert iers.IERS_A.iers_table is not None
assert isinstance(iers.IERS_A.iers_table, QTable)
iers.IERS_A.close()
class TestIERS_AExcerpt():
def test_simple(self):
# Test the IERS A reader. It is also a regression tests that ensures
# values do not get overridden by IERS B; see #4933.
iers_tab = iers.IERS_A.open(IERS_A_EXCERPT)
assert (iers_tab['UT1_UTC'].unit / u.second).is_unity()
assert 'P' in iers_tab['UT1Flag']
assert 'I' in iers_tab['UT1Flag']
assert 'B' in iers_tab['UT1Flag']
assert np.all((iers_tab['UT1Flag'] == 'I') |
(iers_tab['UT1Flag'] == 'P') |
(iers_tab['UT1Flag'] == 'B'))
assert (iers_tab['dX_2000A'].unit / u.marcsec).is_unity()
assert (iers_tab['dY_2000A'].unit / u.marcsec).is_unity()
assert 'P' in iers_tab['NutFlag']
assert 'I' in iers_tab['NutFlag']
assert 'B' in iers_tab['NutFlag']
assert np.all((iers_tab['NutFlag'] == 'P') |
(iers_tab['NutFlag'] == 'I') |
(iers_tab['NutFlag'] == 'B'))
assert (iers_tab['PM_x'].unit / u.arcsecond).is_unity()
assert (iers_tab['PM_y'].unit / u.arcsecond).is_unity()
assert 'P' in iers_tab['PolPMFlag']
assert 'I' in iers_tab['PolPMFlag']
assert 'B' in iers_tab['PolPMFlag']
assert np.all((iers_tab['PolPMFlag'] == 'P') |
(iers_tab['PolPMFlag'] == 'I') |
(iers_tab['PolPMFlag'] == 'B'))
t = Time([57053., 57054., 57055.], format='mjd')
ut1_utc, status = iers_tab.ut1_utc(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
assert_quantity_allclose(ut1_utc,
[-0.4916557, -0.4925323, -0.4934373] * u.s,
atol=0.1*u.ms)
dcip_x, dcip_y, status = iers_tab.dcip_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
print(dcip_x)
print(dcip_y)
assert_quantity_allclose(dcip_x,
[-0.086, -0.093, -0.087] * u.marcsec,
atol=1.*u.narcsec)
assert_quantity_allclose(dcip_y,
[0.094, 0.081, 0.072] * u.marcsec,
atol=1*u.narcsec)
pm_x, pm_y, status = iers_tab.pm_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
assert_quantity_allclose(pm_x,
[0.003734, 0.004581, 0.004623] * u.arcsec,
atol=0.1*u.marcsec)
assert_quantity_allclose(pm_y,
[0.310824, 0.313150, 0.315517] * u.arcsec,
atol=0.1*u.marcsec)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
@pytest.mark.skipif(str('not HAS_IERS_A'))
class TestIERS_A():
def test_simple(self):
"""Test that open() by default reads a 'finals2000A.all' file."""
# Ensure we remove any cached table (gh-5131).
iers.IERS_A.close()
iers_tab = iers.IERS_A.open()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc, status = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status == iers.FROM_IERS_B)
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status2 == iers.TIME_BEYOND_IERS_RANGE
tnow = Time.now()
ut1_utc3, status3 = iers_tab.ut1_utc(tnow, return_status=True)
assert status3 == iers.FROM_IERS_A_PREDICTION
assert ut1_utc3 != 0.
class TestIERS_Auto():
def setup_class(self):
"""Set up useful data for the tests.
"""
self.N = 40
self.ame = 30.0
self.iers_a_file_1 = os.path.join(os.path.dirname(__file__), 'data', 'finals2000A-2016-02-30-test')
self.iers_a_file_2 = os.path.join(os.path.dirname(__file__), 'data', 'finals2000A-2016-04-30-test')
self.iers_a_url_1 = os.path.normpath('file://' + os.path.abspath(self.iers_a_file_1))
self.iers_a_url_2 = os.path.normpath('file://' + os.path.abspath(self.iers_a_file_2))
self.t = Time.now() + TimeDelta(10, format='jd') * np.arange(self.N)
def teardown_method(self, method):
"""Run this after every test.
"""
iers.IERS_Auto.close()
def test_interpolate_error_formatting(self):
"""Regression test: make sure the error message in
IERS_Auto._check_interpolate_indices() is formatted correctly.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('iers_auto_url_mirror', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', self.ame):
with pytest.raises(ValueError) as err:
iers_table = iers.IERS_Auto.open()
with warnings.catch_warnings():
# Ignoring this if it comes up -- IERS_Auto predictive
# values are older than 30.0 days but downloading the
# latest table did not find newer values
warnings.simplefilter('ignore', iers.IERSStaleWarning)
iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert str(err.value) == iers.INTERPOLATE_ERROR.format(self.ame)
def test_auto_max_age_none(self):
"""Make sure that iers.INTERPOLATE_ERROR's advice about setting
auto_max_age = None actually works.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', None):
iers_table = iers.IERS_Auto.open()
delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert isinstance(delta, np.ndarray)
assert delta.shape == (self.N,)
assert_quantity_allclose(delta, np.array([-0.2246227]*self.N)*u.s)
def test_auto_max_age_minimum(self):
"""Check that the minimum auto_max_age is enforced.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', 5.0):
with pytest.raises(ValueError) as err:
iers_table = iers.IERS_Auto.open()
delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert str(err.value) == 'IERS auto_max_age configuration value must be larger than 10 days'
@pytest.mark.remote_data
def test_no_auto_download(self):
with iers.conf.set_temp('auto_download', False):
t = iers.IERS_Auto.open()
assert type(t) is iers.IERS_B
@pytest.mark.remote_data
def test_simple(self):
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
dat = iers.IERS_Auto.open()
assert dat['MJD'][0] == 57359.0 * u.d
assert dat['MJD'][-1] == 57539.0 * u.d
# Pretend we are accessing at a time 7 days after start of predictive data
predictive_mjd = dat.meta['predictive_mjd']
dat._time_now = Time(predictive_mjd, format='mjd') + 7 * u.d
# Look at times before and after the test file begins. 0.1292905 is
# the IERS-B value from MJD=57359. The value in
# finals2000A-2016-02-30-test has been replaced at this point.
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905)
assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.2246227)
# Now pretend we are accessing at time 60 days after start of predictive data.
# There will be a warning when downloading the file doesn't give new data
# and an exception when extrapolating into the future with insufficient data.
dat._time_now = Time(predictive_mjd, format='mjd') + 60 * u.d
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905)
with catch_warnings(iers.IERSStaleWarning) as warns:
with pytest.raises(ValueError) as err:
dat.ut1_utc(Time(60000, format='mjd').jd)
assert 'interpolating from IERS_Auto using predictive values' in str(err)
assert len(warns) == 1
assert 'IERS_Auto predictive values are older' in str(warns[0].message)
# Warning only if we are getting return status
with catch_warnings(iers.IERSStaleWarning) as warns:
dat.ut1_utc(Time(60000, format='mjd').jd, return_status=True)
assert len(warns) == 1
assert 'IERS_Auto predictive values are older' in str(warns[0].message)
# Now set auto_max_age = None which says that we don't care how old the
# available IERS-A file is. There should be no warnings or exceptions.
with iers.conf.set_temp('auto_max_age', None):
with catch_warnings(iers.IERSStaleWarning) as warns:
dat.ut1_utc(Time(60000, format='mjd').jd)
assert not warns
# Now point to a later file with same values but MJD increased by
# 60 days and see that things work. dat._time_now is still the same value
# as before, i.e. right around the start of predictive values for the new file.
# (In other words this is like downloading the latest file online right now).
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_2):
# Look at times before and after the test file begins. This forces a new download.
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905)
assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.3)
# Now the time range should be different.
assert dat['MJD'][0] == 57359.0 * u.d
assert dat['MJD'][-1] == (57539.0 + 60) * u.d
|
86c588a7265e2b6ba9ddd4a411e08692335ad0ed383913f637067fb9ffdc2184 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# LOCAL
from astropy.utils.xml.iterparser import _fast_iterparse
# SYSTEM
import io
import zlib
# The C-based XML parser for VOTables previously used fixed-sized
# buffers (allocated at __init__() time). This test will
# only pass with the patch that allows a dynamic realloc() of
# the queue. This addresses the bugs:
#
# - "RuntimeError: XML queue overflow"
# https://github.com/astropy/astropy/issues/5824
# (Kudos to Stefan Becker---ARI/ZAH Heidelberg)
#
# - "iterparse.c: add queue_realloc() + move 'buffersize / 2' logic there"
# https://github.com/astropy/astropy/issues/5869
#
# This test code can emulate a combination of network buffering and
# gzip decompression---with different request sizes, it can be used to
# demonstrate both under-reading and over-reading.
#
# Using the 512-tag VOTABLE XML sample input, and various combinations
# of minimum/maximum fetch sizes, the following situations can be
# generated:
#
# maximum_fetch = 1 (ValueError, no element found) still within gzip headers
# maximum_fetch = 80 (ValueError, unclosed token) short read
# maximum_fetch =217 passes, because decompressed_length > requested
# && <512 tags in a single parse
# maximum_fetch =218 (RuntimeError, XML queue overflow)
#
# The test provided here covers the over-reading identified in #5824
# (equivalent to the 217).
# Firstly, assemble a minimal VOTABLE header, table contents and footer.
# This is done in textual form, as the aim is to only test the parser, not
# the outputter!
HEADER = """<?xml version="1.0" encoding="UTF-8"?>
<VOTABLE>
<RESOURCE type="results">
<TABLE>
<FIELD ID="foo" name="foo" datatype="int" arraysize="1"/>
<DATA>
<TABLEDATA>
"""
ROW = """<TR><TD>0</TD></TR>
"""
FOOTER = """
</TABLEDATA>
</DATA>
</TABLE>
</RESOURCE>
</VOTABLE>
"""
# minimum passable buffer size => 1024
# 1024 / 2 => 512 tags for overflow
# 512 - 7 tags in header, - 5 tags in footer = 500 tags required for overflow
# 500 / 4 tags (<tr><td></td></tr>) per row == 125 rows required for overflow
VOTABLE_XML = HEADER + 125*ROW + FOOTER
# UngzipFileWrapper() wraps an existing file-like Object,
# decompressing the content and returning the plaintext.
# This therefore emulates the behavior of the Python 'requests'
# library when transparently decompressing Gzip HTTP responses.
#
# The critical behavior is that---because of the
# decompression---read() can return considerably more
# bytes than were requested! (But, read() can also return less).
#
# inspiration:
# http://stackoverflow.com/questions/4013843/how-to-wrap-file-object-read-and-write-operation-which-are-readonly
class UngzipFileWrapper:
def __init__(self, fd, **kwargs):
self._file = fd
self._z = zlib.decompressobj(16 + zlib.MAX_WBITS)
def read(self, requested_length):
# emulate network buffering dynamics by clamping the read size
clamped_length = max(1, min(1 << 24, requested_length))
compressed = self._file.read(clamped_length)
plaintext = self._z.decompress(compressed)
# Only for real local files---just for the testcase
if len(compressed) == 0:
self.close()
return plaintext
def __getattr__(self, attr):
return getattr(self._file, attr)
# test_iterparser_over_read_simple() is a very cut down test,
# of the original more flexible test-case, but without external
# dependencies. The plaintext is compressed and then decompressed
# to provide a better emulation of the original situation where
# the bug was observed.
#
# If a dependency upon 'zlib' is not desired, it would be possible to
# simplify this testcase by replacing the compress/decompress with a
# read() method emulation that always returned more from a buffer tha
# was requested.
def test_iterparser_over_read_simple():
# Take the plaintext of 512 tags, and compression it with a
# Gzip-style header (+16), to most closely emulate the behavior
# of most HTTP servers.
zlib_GZIP_STYLE_HEADER = 16
compo = zlib.compressobj(zlib.Z_BEST_COMPRESSION,
zlib.DEFLATED,
zlib.MAX_WBITS + zlib_GZIP_STYLE_HEADER)
# Bytes vs. String .encode()/.decode() for compatibility with Python 3.5.
s = compo.compress(VOTABLE_XML.encode())
s = s + compo.flush()
fd = io.BytesIO(s)
fd.seek(0)
# Finally setup the test of the C-based '_fast_iterparse()' iterator
# and a situation in which it can be called a-la the VOTable Parser.
MINIMUM_REQUESTABLE_BUFFER_SIZE = 1024
uncompressed_fd = UngzipFileWrapper(fd)
iterable = _fast_iterparse(uncompressed_fd.read,
MINIMUM_REQUESTABLE_BUFFER_SIZE)
list(iterable)
|
d4d4a4ede0b94ff92011f1aadfc27057e1a1fc7b241279c16ddb98a3e7e50d9f | from concurrent.futures import *
import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn("astropy.utils.compat.futures is now deprecated - "
"use concurrent.futures instead", AstropyDeprecationWarning)
|
d949da1df57bf8d121208c24be7edc53aa55c55a92fd0a717364f266616c81c9 | # coding: utf-8
# Licensed like numpy; see licenses/NUMPY_LICENSE.rst
import warnings
import numpy as np
from numpy import matmul as np_matmul
from astropy.utils.exceptions import AstropyDeprecationWarning
__all__ = ['matmul', 'GE1P10']
def GE1P10(module=np):
return hasattr(module, 'matmul')
def matmul(*args, **kwargs):
warnings.warn(
'This function is deprecated, as it is available in all NumPy versions '
'that this version of Astropy supports. You should use '
'numpy.matmul directly.', AstropyDeprecationWarning)
return np_matmul(*args, **kwargs)
|
1d0843c7ca67710cafebe022ff93959f9986130fea6f46453e0c898198768ceb | # coding: utf-8
# Licensed like the corresponding numpy file; see licenses/NUMPY_LICENSE.rst
"""
Utilities that manipulate strides to achieve desirable effects.
An explanation of strides can be found in the "ndarray.rst" file in the
NumPy reference guide.
"""
import warnings
import numpy as np
from numpy.lib.stride_tricks import (
broadcast_arrays as np_broadcast_arrays,
broadcast_to as np_broadcast_to)
from astropy.utils.exceptions import AstropyDeprecationWarning
__all__ = ['broadcast_arrays', 'broadcast_to', 'GE1P10']
__doctest_skip__ = ['*']
def GE1P10(module=np):
return hasattr(module, 'broadcast_to')
def broadcast_arrays(*args, **kwargs):
warnings.warn(
'This function is deprecated, as it is available in all NumPy versions '
'that this version of Astropy supports. You should use '
'numpy.broadcast_arrays directly.', AstropyDeprecationWarning)
return np_broadcast_arrays(*args, **kwargs)
def broadcast_to(*args, **kwargs):
warnings.warn(
'This function is deprecated, as it is available in all NumPy versions '
'that this version of Astropy supports. You should use '
'numpy.broadcast_to directly.', AstropyDeprecationWarning)
return np_broadcast_to(*args, **kwargs)
|
5dfa4cc90d99be96ad0bc951441dc7889623a211f7e48d2b463d73ce2a5cf8dc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from distutils.version import LooseVersion
from astropy.visualization.mpl_normalize import simple_norm
from astropy import log
from astropy.io.fits import getdata
def fits2bitmap(filename, ext=0, out_fn=None, stretch='linear',
power=1.0, asinh_a=0.1, min_cut=None, max_cut=None,
min_percent=None, max_percent=None, percent=None,
cmap='Greys_r'):
"""
Create a bitmap file from a FITS image, applying a stretching
transform between minimum and maximum cut levels and a matplotlib
colormap.
Parameters
----------
filename : str
The filename of the FITS file.
ext : int
FITS extension name or number of the image to convert. The
default is 0.
out_fn : str
The filename of the output bitmap image. The type of bitmap
is determined by the filename extension (e.g. '.jpg', '.png').
The default is a PNG file with the same name as the FITS file.
stretch : {{'linear', 'sqrt', 'power', log', 'asinh'}}
The stretching function to apply to the image. The default is
'linear'.
power : float, optional
The power index for ``stretch='power'``. The default is 1.0.
asinh_a : float, optional
For ``stretch='asinh'``, the value where the asinh curve
transitions from linear to logarithmic behavior, expressed as a
fraction of the normalized image. Must be in the range between
0 and 1. The default is 0.1.
min_cut : float, optional
The pixel value of the minimum cut level. Data values less than
``min_cut`` will set to ``min_cut`` before stretching the image.
The default is the image minimum. ``min_cut`` overrides
``min_percent``.
max_cut : float, optional
The pixel value of the maximum cut level. Data values greater
than ``min_cut`` will set to ``min_cut`` before stretching the
image. The default is the image maximum. ``max_cut`` overrides
``max_percent``.
min_percent : float, optional
The percentile value used to determine the pixel value of
minimum cut level. The default is 0.0. ``min_percent``
overrides ``percent``.
max_percent : float, optional
The percentile value used to determine the pixel value of
maximum cut level. The default is 100.0. ``max_percent``
overrides ``percent``.
percent : float, optional
The percentage of the image values used to determine the pixel
values of the minimum and maximum cut levels. The lower cut
level will set at the ``(100 - percent) / 2`` percentile, while
the upper cut level will be set at the ``(100 + percent) / 2``
percentile. The default is 100.0. ``percent`` is ignored if
either ``min_percent`` or ``max_percent`` is input.
cmap : str
The matplotlib color map name. The default is 'Greys_r'.
"""
import matplotlib
import matplotlib.cm as cm
import matplotlib.image as mimg
# __main__ gives ext as a string
try:
ext = int(ext)
except ValueError:
pass
try:
image = getdata(filename, ext)
except Exception as e:
log.critical(e)
return 1
if image.ndim != 2:
log.critical('data in FITS extension {0} is not a 2D array'
.format(ext))
if out_fn is None:
out_fn = os.path.splitext(filename)[0]
if out_fn.endswith('.fits'):
out_fn = os.path.splitext(out_fn)[0]
out_fn += '.png'
# need to explicitly define the output format due to a bug in
# matplotlib (<= 2.1), otherwise the format will always be PNG
out_format = os.path.splitext(out_fn)[1][1:]
# workaround for matplotlib 2.0.0 bug where png images are inverted
# (mpl-#7656)
if (out_format.lower() == 'png' and
LooseVersion(matplotlib.__version__) == LooseVersion('2.0.0')):
image = image[::-1]
if cmap not in cm.datad:
log.critical('{0} is not a valid matplotlib colormap name.'
.format(cmap))
return 1
norm = simple_norm(image, stretch=stretch, power=power, asinh_a=asinh_a,
min_cut=min_cut, max_cut=max_cut,
min_percent=min_percent, max_percent=max_percent,
percent=percent)
mimg.imsave(out_fn, norm(image), cmap=cmap, origin='lower',
format=out_format)
log.info('Saved file to {0}.'.format(out_fn))
def main(args=None):
import argparse
parser = argparse.ArgumentParser(
description='Create a bitmap file from a FITS image.')
parser.add_argument('-e', '--ext', metavar='hdu', default=0,
help='Specify the HDU extension number or name '
'(Default is 0).')
parser.add_argument('-o', metavar='filename', type=str, default=None,
help='Filename for the output image (Default is a '
'PNG file with the same name as the FITS file).')
parser.add_argument('--stretch', type=str, default='linear',
help='Type of image stretching ("linear", "sqrt", '
'"power", "log", or "asinh") (Default is "linear").')
parser.add_argument('--power', type=float, default=1.0,
help='Power index for "power" stretching (Default is '
'1.0).')
parser.add_argument('--asinh_a', type=float, default=0.1,
help='The value in normalized image where the asinh '
'curve transitions from linear to logarithmic '
'behavior (used only for "asinh" stretch) '
'(Default is 0.1).')
parser.add_argument('--min_cut', type=float, default=None,
help='The pixel value of the minimum cut level '
'(Default is the image minimum).')
parser.add_argument('--max_cut', type=float, default=None,
help='The pixel value of the maximum cut level '
'(Default is the image maximum).')
parser.add_argument('--min_percent', type=float, default=None,
help='The percentile value used to determine the '
'minimum cut level (Default is 0).')
parser.add_argument('--max_percent', type=float, default=None,
help='The percentile value used to determine the '
'maximum cut level (Default is 100).')
parser.add_argument('--percent', type=float, default=None,
help='The percentage of the image values used to '
'determine the pixel values of the minimum and '
'maximum cut levels (Default is 100).')
parser.add_argument('--cmap', metavar='colormap_name', type=str,
default='Greys_r', help='matplotlib color map name '
'(Default is "Greys_r").')
parser.add_argument('filename', nargs='+',
help='Path to one or more FITS files to convert')
args = parser.parse_args(args)
for filename in args.filename:
fits2bitmap(filename, ext=args.ext, out_fn=args.o,
stretch=args.stretch, min_cut=args.min_cut,
max_cut=args.max_cut, min_percent=args.min_percent,
max_percent=args.max_percent, percent=args.percent,
power=args.power, asinh_a=args.asinh_a, cmap=args.cmap)
|
1ff147e575ddb307bd93477df0e2b7d0ea034fecc491cdc098815f79878633f9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Note: This file incldues code dervived from pywcsgrid2
#
# This file contains Matplotlib transformation objects (e.g. from pixel to world
# coordinates, but also world-to-world).
import abc
import numpy as np
from matplotlib.path import Path
from matplotlib.transforms import Transform
from astropy import units as u
from astropy.wcs import WCS
from astropy.wcs.utils import wcs_to_celestial_frame
from astropy.coordinates import (SkyCoord, frame_transform_graph,
SphericalRepresentation,
UnitSphericalRepresentation,
BaseCoordinateFrame)
class CurvedTransform(Transform, metaclass=abc.ABCMeta):
"""
Abstract base class for non-affine curved transforms
"""
input_dims = 2
output_dims = 2
is_separable = False
def transform_path(self, path):
"""
Transform a Matplotlib Path
Parameters
----------
path : :class:`~matplotlib.path.Path`
The path to transform
Returns
-------
path : :class:`~matplotlib.path.Path`
The resulting path
"""
return Path(self.transform(path.vertices), path.codes)
transform_path_non_affine = transform_path
def transform(self, input):
raise NotImplementedError("")
def inverted(self):
raise NotImplementedError("")
class WCSWorld2PixelTransform(CurvedTransform):
"""
WCS transformation from world to pixel coordinates
"""
has_inverse = True
def __init__(self, wcs, slice=None):
super().__init__()
self.wcs = wcs
if self.wcs.wcs.naxis > 2:
if slice is None:
raise ValueError("WCS has more than 2 dimensions, so ``slice`` should be set")
elif len(slice) != self.wcs.wcs.naxis:
raise ValueError("slice should have as many elements as WCS "
"has dimensions (should be {0})".format(self.wcs.wcs.naxis))
else:
self.slice = slice
self.x_index = slice.index('x')
self.y_index = slice.index('y')
else:
self.slice = None
def __eq__(self, other):
return (isinstance(other, type(self)) and self.wcs == other.wcs
and self.slice == other.slice)
@property
def input_dims(self):
return self.wcs.wcs.naxis
def transform(self, world):
"""
Transform world to pixel coordinates. You should pass in a NxM array
where N is the number of points to transform, and M is the number of
dimensions in the WCS. This then returns the (x, y) pixel coordinates
as a Nx2 array.
"""
if world.shape[1] != self.wcs.wcs.naxis:
raise ValueError("Second dimension of input values should match number of WCS coordinates")
if world.shape[0] == 0:
pixel = np.zeros((0, 2))
else:
pixel = self.wcs.wcs_world2pix(world, 1) - 1
if self.slice is None:
return pixel
else:
return pixel[:, (self.x_index, self.y_index)]
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return WCSPixel2WorldTransform(self.wcs, slice=self.slice)
class WCSPixel2WorldTransform(CurvedTransform):
"""
WCS transformation from pixel to world coordinates
"""
has_inverse = True
def __init__(self, wcs, slice=None):
super().__init__()
self.wcs = wcs
self.slice = slice
if self.slice is not None:
self.x_index = slice.index('x')
self.y_index = slice.index('y')
def __eq__(self, other):
return (isinstance(other, type(self)) and self.wcs == other.wcs
and self.slice == other.slice)
@property
def output_dims(self):
return self.wcs.wcs.naxis
def transform(self, pixel):
"""
Transform pixel to world coordinates. You should pass in a Nx2 array
of (x, y) pixel coordinates to transform to world coordinates. This
will then return an NxM array where M is the number of dimensions in
the WCS
"""
if self.slice is None:
pixel_full = pixel.copy()
else:
pixel_full = []
for index in self.slice:
if index == 'x':
pixel_full.append(pixel[:, 0])
elif index == 'y':
pixel_full.append(pixel[:, 1])
else:
pixel_full.append(index)
pixel_full = np.array(np.broadcast_arrays(*pixel_full)).transpose()
pixel_full += 1
if pixel_full.shape[0] == 0:
world = np.zeros((0, 2))
else:
world = self.wcs.wcs_pix2world(pixel_full, 1)
# At the moment, one has to manually check that the transformation
# round-trips, otherwise it should be considered invalid.
pixel_check = self.wcs.wcs_world2pix(world, 1)
with np.errstate(invalid='ignore'):
invalid = np.any(np.abs(pixel_check - pixel_full) > 1., axis=1)
world[invalid] = np.nan
return world
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return WCSWorld2PixelTransform(self.wcs, slice=self.slice)
class CoordinateTransform(CurvedTransform):
has_inverse = True
def __init__(self, input_system, output_system):
super().__init__()
self._input_system_name = input_system
self._output_system_name = output_system
if isinstance(self._input_system_name, WCS):
self.input_system = wcs_to_celestial_frame(self._input_system_name)
elif isinstance(self._input_system_name, str):
self.input_system = frame_transform_graph.lookup_name(self._input_system_name)
if self.input_system is None:
raise ValueError("Frame {0} not found".format(self._input_system_name))
elif isinstance(self._input_system_name, BaseCoordinateFrame):
self.input_system = self._input_system_name
else:
raise TypeError("input_system should be a WCS instance, string, or a coordinate frame instance")
if isinstance(self._output_system_name, WCS):
self.output_system = wcs_to_celestial_frame(self._output_system_name)
elif isinstance(self._output_system_name, str):
self.output_system = frame_transform_graph.lookup_name(self._output_system_name)
if self.output_system is None:
raise ValueError("Frame {0} not found".format(self._output_system_name))
elif isinstance(self._output_system_name, BaseCoordinateFrame):
self.output_system = self._output_system_name
else:
raise TypeError("output_system should be a WCS instance, string, or a coordinate frame instance")
if self.output_system == self.input_system:
self.same_frames = True
else:
self.same_frames = False
@property
def same_frames(self):
return self._same_frames
@same_frames.setter
def same_frames(self, same_frames):
self._same_frames = same_frames
def transform(self, input_coords):
"""
Transform one set of coordinates to another
"""
if self.same_frames:
return input_coords
input_coords = input_coords*u.deg
x_in, y_in = input_coords[:, 0], input_coords[:, 1]
c_in = SkyCoord(UnitSphericalRepresentation(x_in, y_in),
frame=self.input_system)
# We often need to transform arrays that contain NaN values, and filtering
# out the NaN values would have a performance hit, so instead we just pass
# on all values and just ignore Numpy warnings
with np.errstate(all='ignore'):
c_out = c_in.transform_to(self.output_system)
lon = c_out.spherical.lon.deg
lat = c_out.spherical.lat.deg
return np.concatenate((lon[:, np.newaxis], lat[:, np.newaxis]), axis=1)
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return CoordinateTransform(self._output_system_name, self._input_system_name)
|
ddd1154ad1e1bd668bd071134b03d19949271c9e2347ca0ec461271547de7e57 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file defines the classes used to represent a 'coordinate', which includes
axes, ticks, tick labels, and grid lines.
"""
import warnings
import numpy as np
from matplotlib.ticker import Formatter
from matplotlib.transforms import Affine2D, ScaledTranslation
from matplotlib.patches import PathPatch
from matplotlib import rcParams
from astropy import units as u
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from .formatter_locator import AngleFormatterLocator, ScalarFormatterLocator
from .ticks import Ticks
from .ticklabels import TickLabels
from .axislabels import AxisLabels
from .grid_paths import get_lon_lat_path, get_gridline_path
__all__ = ['CoordinateHelper']
# Matplotlib's gridlines use Line2D, but ours use PathPatch.
# Patches take a slightly different format of linestyle argument.
LINES_TO_PATCHES_LINESTYLE = {'-': 'solid',
'--': 'dashed',
'-.': 'dashdot',
':': 'dotted',
'none': 'none',
'None': 'none',
' ': 'none',
'': 'none'}
def wrap_angle_at(values, coord_wrap):
# On ARM processors, np.mod emits warnings if there are NaN values in the
# array, although this doesn't seem to happen on other processors.
with np.errstate(invalid='ignore'):
return np.mod(values - coord_wrap, 360.) - (360. - coord_wrap)
class CoordinateHelper:
"""
Helper class to control one of the coordinates in the
:class:`~astropy.visualization.wcsaxes.WCSAxes`.
Parameters
----------
parent_axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`
The axes the coordinate helper belongs to.
parent_map : :class:`~astropy.visualization.wcsaxes.CoordinatesMap`
The :class:`~astropy.visualization.wcsaxes.CoordinatesMap` object this
coordinate belongs to.
transform : `~matplotlib.transforms.Transform`
The transform corresponding to this coordinate system.
coord_index : int
The index of this coordinate in the
:class:`~astropy.visualization.wcsaxes.CoordinatesMap`.
coord_type : {'longitude', 'latitude', 'scalar'}
The type of this coordinate, which is used to determine the wrapping and
boundary behavior of coordinates. Longitudes wrap at ``coord_wrap``,
latitudes have to be in the range -90 to 90, and scalars are unbounded
and do not wrap.
coord_unit : `~astropy.units.Unit`
The unit that this coordinate is in given the output of transform.
format_unit : `~astropy.units.Unit`, optional
The unit to use to display the coordinates.
coord_wrap : float
The angle at which the longitude wraps (defaults to 360)
frame : `~astropy.visualization.wcsaxes.frame.BaseFrame`
The frame of the :class:`~astropy.visualization.wcsaxes.WCSAxes`.
"""
def __init__(self, parent_axes=None, parent_map=None, transform=None,
coord_index=None, coord_type='scalar', coord_unit=None,
coord_wrap=None, frame=None, format_unit=None):
# Keep a reference to the parent axes and the transform
self.parent_axes = parent_axes
self.parent_map = parent_map
self.transform = transform
self.coord_index = coord_index
self.coord_unit = coord_unit
self.format_unit = format_unit
self.frame = frame
self.set_coord_type(coord_type, coord_wrap)
# Initialize ticks
self.dpi_transform = Affine2D()
self.offset_transform = ScaledTranslation(0, 0, self.dpi_transform)
self.ticks = Ticks(transform=parent_axes.transData + self.offset_transform)
# Initialize tick labels
self.ticklabels = TickLabels(self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure())
self.ticks.display_minor_ticks(rcParams['xtick.minor.visible'])
self.minor_frequency = 5
# Initialize axis labels
self.axislabels = AxisLabels(self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure())
# Initialize container for the grid lines
self.grid_lines = []
# Initialize grid style. Take defaults from matplotlib.rcParams.
# Based on matplotlib.axis.YTick._get_gridline.
self.grid_lines_kwargs = {'visible': False,
'facecolor': 'none',
'edgecolor': rcParams['grid.color'],
'linestyle': LINES_TO_PATCHES_LINESTYLE[rcParams['grid.linestyle']],
'linewidth': rcParams['grid.linewidth'],
'alpha': rcParams['grid.alpha'],
'transform': self.parent_axes.transData}
def grid(self, draw_grid=True, grid_type=None, **kwargs):
"""
Plot grid lines for this coordinate.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : {'lines', 'contours'}
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended. By default, 'lines' is used if the transform has
an inverse, otherwise 'contours' is used.
"""
if grid_type == 'lines' and not self.transform.has_inverse:
raise ValueError('The specified transform has no inverse, so the '
'grid cannot be drawn using grid_type=\'lines\'')
if grid_type is None:
grid_type = 'lines' if self.transform.has_inverse else 'contours'
if grid_type in ('lines', 'contours'):
self._grid_type = grid_type
else:
raise ValueError("grid_type should be 'lines' or 'contours'")
if 'color' in kwargs:
kwargs['edgecolor'] = kwargs.pop('color')
self.grid_lines_kwargs.update(kwargs)
if self.grid_lines_kwargs['visible']:
if not draw_grid:
self.grid_lines_kwargs['visible'] = False
else:
self.grid_lines_kwargs['visible'] = True
def set_coord_type(self, coord_type, coord_wrap=None):
"""
Set the coordinate type for the axis.
Parameters
----------
coord_type : str
One of 'longitude', 'latitude' or 'scalar'
coord_wrap : float, optional
The value to wrap at for angular coordinates
"""
self.coord_type = coord_type
if coord_type == 'longitude' and coord_wrap is None:
self.coord_wrap = 360
elif coord_type != 'longitude' and coord_wrap is not None:
raise NotImplementedError('coord_wrap is not yet supported '
'for non-longitude coordinates')
else:
self.coord_wrap = coord_wrap
# Initialize tick formatter/locator
if coord_type == 'scalar':
self._coord_scale_to_deg = None
self._formatter_locator = ScalarFormatterLocator(unit=self.coord_unit)
elif coord_type in ['longitude', 'latitude']:
if self.coord_unit is u.deg:
self._coord_scale_to_deg = None
else:
self._coord_scale_to_deg = self.coord_unit.to(u.deg)
self._formatter_locator = AngleFormatterLocator(unit=self.coord_unit,
format_unit=self.format_unit)
else:
raise ValueError("coord_type should be one of 'scalar', 'longitude', or 'latitude'")
def set_major_formatter(self, formatter):
"""
Set the formatter to use for the major tick labels.
Parameters
----------
formatter : str or Formatter
The format or formatter to use.
"""
if isinstance(formatter, Formatter):
raise NotImplementedError() # figure out how to swap out formatter
elif isinstance(formatter, str):
self._formatter_locator.format = formatter
else:
raise TypeError("formatter should be a string or a Formatter "
"instance")
def format_coord(self, value, format='auto'):
"""
Given the value of a coordinate, will format it according to the
format of the formatter_locator.
Parameters
----------
value : float
The value to format
format : {'auto', 'ascii', 'latex'}, optional
The format to use - by default the formatting will be adjusted
depending on whether Matplotlib is using LaTeX or MathTex. To
get plain ASCII strings, use format='ascii'.
"""
if not hasattr(self, "_fl_spacing"):
return "" # _update_ticks has not been called yet
fl = self._formatter_locator
if isinstance(fl, AngleFormatterLocator):
# Convert to degrees if needed
if self._coord_scale_to_deg is not None:
value *= self._coord_scale_to_deg
if self.coord_type == 'longitude':
value = wrap_angle_at(value, self.coord_wrap)
value = value * u.degree
value = value.to_value(fl._unit)
spacing = self._fl_spacing
string = fl.formatter(values=[value] * fl._unit, spacing=spacing, format=format)
return string[0]
def set_separator(self, separator):
"""
Set the separator to use for the angle major tick labels.
Parameters
----------
separator : str or tuple or None
The separator between numbers in sexagesimal representation. Can be
either a string or a tuple (or `None` for default).
"""
if not (self._formatter_locator.__class__ == AngleFormatterLocator):
raise TypeError("Separator can only be specified for angle coordinates")
if isinstance(separator, (str, tuple)) or separator is None:
self._formatter_locator.sep = separator
else:
raise TypeError("separator should be a string, a tuple, or None")
def set_format_unit(self, unit, decimal=None, show_decimal_unit=True):
"""
Set the unit for the major tick labels.
Parameters
----------
unit : class:`~astropy.units.Unit`
The unit to which the tick labels should be converted to.
decimal : bool, optional
Whether to use decimal formatting. By default this is `False`
for degrees or hours (which therefore use sexagesimal formatting)
and `True` for all other units.
show_decimal_unit : bool, optional
Whether to include units when in decimal mode.
"""
self._formatter_locator.format_unit = u.Unit(unit)
self._formatter_locator.decimal = decimal
self._formatter_locator.show_decimal_unit = show_decimal_unit
def set_ticks(self, values=None, spacing=None, number=None, size=None,
width=None, color=None, alpha=None, direction=None,
exclude_overlapping=None):
"""
Set the location and properties of the ticks.
At most one of the options from ``values``, ``spacing``, or
``number`` can be specified.
Parameters
----------
values : iterable, optional
The coordinate values at which to show the ticks.
spacing : float, optional
The spacing between ticks.
number : float, optional
The approximate number of ticks shown.
size : float, optional
The length of the ticks in points
color : str or tuple, optional
A valid Matplotlib color for the ticks
alpha : float, optional
The alpha value (transparency) for the ticks.
direction : {'in','out'}, optional
Whether the ticks should point inwards or outwards.
"""
if sum([values is None, spacing is None, number is None]) < 2:
raise ValueError("At most one of values, spacing, or number should "
"be specified")
if values is not None:
self._formatter_locator.values = values
elif spacing is not None:
self._formatter_locator.spacing = spacing
elif number is not None:
self._formatter_locator.number = number
if size is not None:
self.ticks.set_ticksize(size)
if width is not None:
self.ticks.set_linewidth(width)
if color is not None:
self.ticks.set_color(color)
if alpha is not None:
self.ticks.set_alpha(alpha)
if direction is not None:
if direction in ('in', 'out'):
self.ticks.set_tick_out(direction == 'out')
else:
raise ValueError("direction should be 'in' or 'out'")
if exclude_overlapping is not None:
warnings.warn("exclude_overlapping= should be passed to "
"set_ticklabel instead of set_ticks",
AstropyDeprecationWarning)
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
def set_ticks_position(self, position):
"""
Set where ticks should appear
Parameters
----------
position : str
The axes on which the ticks for this coordinate should appear.
Should be a string containing zero or more of ``'b'``, ``'t'``,
``'l'``, ``'r'``. For example, ``'lb'`` will lead the ticks to be
shown on the left and bottom axis.
"""
self.ticks.set_visible_axes(position)
def set_ticks_visible(self, visible):
"""
Set whether ticks are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide ticks
along this coordinate.
"""
self.ticks.set_visible(visible)
def set_ticklabel(self, color=None, size=None, pad=None,
exclude_overlapping=None, **kwargs):
"""
Set the visual properties for the tick labels.
Parameters
----------
size : float, optional
The size of the ticks labels in points
color : str or tuple, optional
A valid Matplotlib color for the tick labels
pad : float, optional
Distance in points between tick and label.
exclude_overlapping : bool, optional
Whether to exclude tick labels that overlap over each other.
kwargs
Other keyword arguments are passed to :class:`matplotlib.text.Text`.
"""
if size is not None:
self.ticklabels.set_size(size)
if color is not None:
self.ticklabels.set_color(color)
if pad is not None:
self.ticklabels.set_pad(pad)
if exclude_overlapping is not None:
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
self.ticklabels.set(**kwargs)
def set_ticklabel_position(self, position):
"""
Set where tick labels should appear
Parameters
----------
position : str
The axes on which the tick labels for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
tick labels to be shown on the left and bottom axis.
"""
self.ticklabels.set_visible_axes(position)
def set_ticklabel_visible(self, visible):
"""
Set whether the tick labels are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide this
coordinate's tick labels.
"""
self.ticklabels.set_visible(visible)
def set_axislabel(self, text, minpad=1, **kwargs):
"""
Set the text and optionally visual properties for the axis label.
Parameters
----------
text : str
The axis label text.
minpad : float, optional
The padding for the label in terms of axis label font size.
kwargs
Keywords are passed to :class:`matplotlib.text.Text`. These
can include keywords to set the ``color``, ``size``, ``weight``, and
other text properties.
"""
fontdict = kwargs.pop('fontdict', None)
# NOTE: When using plt.xlabel/plt.ylabel, minpad can get set explicitly
# to None so we need to make sure that in that case we change to a
# default numerical value.
if minpad is None:
minpad = 1
self.axislabels.set_text(text)
self.axislabels.set_minpad(minpad)
self.axislabels.set(**kwargs)
if fontdict is not None:
self.axislabels.update(fontdict)
def get_axislabel(self):
"""
Get the text for the axis label
Returns
-------
label : str
The axis label
"""
return self.axislabels.get_text()
def set_axislabel_position(self, position):
"""
Set where axis labels should appear
Parameters
----------
position : str
The axes on which the axis label for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
axis label to be shown on the left and bottom axis.
"""
self.axislabels.set_visible_axes(position)
def set_axislabel_visibility_rule(self, rule):
"""
Set the rule used to determine when the axis label is drawn.
Parameters
----------
rule : str
If the rule is 'always' axis labels will always be drawn on the
axis. If the rule is 'ticks' the label will only be drawn if ticks
were drawn on that axis. If the rule is 'labels' the axis label
will only be drawn if tick labels were drawn on that axis.
"""
self.axislabels.set_visibility_rule(rule)
def get_axislabel_visibility_rule(self, rule):
"""
Get the rule used to determine when the axis label is drawn.
"""
return self.axislabels.get_visibility_rule()
@property
def locator(self):
return self._formatter_locator.locator
@property
def formatter(self):
return self._formatter_locator.formatter
def _draw_grid(self, renderer):
renderer.open_group('grid lines')
self._update_ticks()
if self.grid_lines_kwargs['visible']:
if self._grid_type == 'lines':
self._update_grid_lines()
else:
self._update_grid_contour()
if self._grid_type == 'lines':
frame_patch = self.frame.patch
for path in self.grid_lines:
p = PathPatch(path, **self.grid_lines_kwargs)
p.set_clip_path(frame_patch)
p.draw(renderer)
elif self._grid is not None:
for line in self._grid.collections:
line.set(**self.grid_lines_kwargs)
line.draw(renderer)
renderer.close_group('grid lines')
def _draw_ticks(self, renderer, bboxes, ticklabels_bbox, ticks_locs):
renderer.open_group('ticks')
self.ticks.draw(renderer, ticks_locs)
self.ticklabels.draw(renderer, bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
tick_out_size=self.ticks.out_size)
renderer.close_group('ticks')
def _draw_axislabels(self, renderer, bboxes, ticklabels_bbox, ticks_locs, visible_ticks):
renderer.open_group('axis labels')
self.axislabels.draw(renderer, bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
coord_ticklabels_bbox=ticklabels_bbox[self],
ticks_locs=ticks_locs,
visible_ticks=visible_ticks)
renderer.close_group('axis labels')
def _update_ticks(self):
# TODO: this method should be optimized for speed
# Here we determine the location and rotation of all the ticks. For
# each axis, we can check the intersections for the specific
# coordinate and once we have the tick positions, we can use the WCS
# to determine the rotations.
# Find the range of coordinates in all directions
coord_range = self.parent_map.get_coord_range()
# First find the ticks we want to show
tick_world_coordinates, self._fl_spacing = self.locator(*coord_range[self.coord_index])
if self.ticks.get_display_minor_ticks():
minor_ticks_w_coordinates = self._formatter_locator.minor_locator(self._fl_spacing, self.get_minor_frequency(), *coord_range[self.coord_index])
# We want to allow non-standard rectangular frames, so we just rely on
# the parent axes to tell us what the bounding frame is.
from . import conf
frame = self.frame.sample(conf.frame_boundary_samples)
self.ticks.clear()
self.ticklabels.clear()
self.lblinfo = []
self.lbl_world = []
# Look up parent axes' transform from data to figure coordinates.
#
# See:
# http://matplotlib.org/users/transforms_tutorial.html#the-transformation-pipeline
transData = self.parent_axes.transData
invertedTransLimits = transData.inverted()
for axis, spine in frame.items():
# Determine tick rotation in display coordinates and compare to
# the normal angle in display coordinates.
pixel0 = spine.data
world0 = spine.world[:, self.coord_index]
world0 = self.transform.transform(pixel0)[:, self.coord_index]
axes0 = transData.transform(pixel0)
# Advance 2 pixels in figure coordinates
pixel1 = axes0.copy()
pixel1[:, 0] += 2.0
pixel1 = invertedTransLimits.transform(pixel1)
world1 = self.transform.transform(pixel1)[:, self.coord_index]
# Advance 2 pixels in figure coordinates
pixel2 = axes0.copy()
pixel2[:, 1] += 2.0 if self.frame.origin == 'lower' else -2.0
pixel2 = invertedTransLimits.transform(pixel2)
world2 = self.transform.transform(pixel2)[:, self.coord_index]
dx = (world1 - world0)
dy = (world2 - world0)
# Rotate by 90 degrees
dx, dy = -dy, dx
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
dx *= self._coord_scale_to_deg
dy *= self._coord_scale_to_deg
# Here we wrap at 180 not self.coord_wrap since we want to
# always ensure abs(dx) < 180 and abs(dy) < 180
dx = wrap_angle_at(dx, 180.)
dy = wrap_angle_at(dy, 180.)
tick_angle = np.degrees(np.arctan2(dy, dx))
normal_angle_full = np.hstack([spine.normal_angle, spine.normal_angle[-1]])
with np.errstate(invalid='ignore'):
reset = (((normal_angle_full - tick_angle) % 360 > 90.) &
((tick_angle - normal_angle_full) % 360 > 90.))
tick_angle[reset] -= 180.
# We find for each interval the starting and ending coordinate,
# ensuring that we take wrapping into account correctly for
# longitudes.
w1 = spine.world[:-1, self.coord_index]
w2 = spine.world[1:, self.coord_index]
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
w1 = w1 * self._coord_scale_to_deg
w2 = w2 * self._coord_scale_to_deg
w1 = wrap_angle_at(w1, self.coord_wrap)
w2 = wrap_angle_at(w2, self.coord_wrap)
with np.errstate(invalid='ignore'):
w1[w2 - w1 > 180.] += 360
w2[w1 - w2 > 180.] += 360
if self._coord_scale_to_deg is not None:
w1 = w1 / self._coord_scale_to_deg
w2 = w2 / self._coord_scale_to_deg
# For longitudes, we need to check ticks as well as ticks + 360,
# since the above can produce pairs such as 359 to 361 or 0.5 to
# 1.5, both of which would match a tick at 0.75. Otherwise we just
# check the ticks determined above.
self._compute_ticks(tick_world_coordinates, spine, axis, w1, w2, tick_angle)
if self.ticks.get_display_minor_ticks():
self._compute_ticks(minor_ticks_w_coordinates, spine, axis, w1,
w2, tick_angle, ticks='minor')
# format tick labels, add to scene
text = self.formatter(self.lbl_world * tick_world_coordinates.unit, spacing=self._fl_spacing)
for kwargs, txt in zip(self.lblinfo, text):
self.ticklabels.add(text=txt, **kwargs)
def _compute_ticks(self, tick_world_coordinates, spine, axis, w1, w2,
tick_angle, ticks='major'):
if self.coord_type == 'longitude':
tick_world_coordinates_values = tick_world_coordinates.to_value(u.deg)
tick_world_coordinates_values = np.hstack([tick_world_coordinates_values,
tick_world_coordinates_values + 360])
tick_world_coordinates_values *= u.deg.to(self.coord_unit)
else:
tick_world_coordinates_values = tick_world_coordinates.to_value(self.coord_unit)
for t in tick_world_coordinates_values:
# Find steps where a tick is present. We have to check
# separately for the case where the tick falls exactly on the
# frame points, otherwise we'll get two matches, one for w1 and
# one for w2.
with np.errstate(invalid='ignore'):
intersections = np.hstack([np.nonzero((t - w1) == 0)[0],
np.nonzero(((t - w1) * (t - w2)) < 0)[0]])
# But we also need to check for intersection with the last w2
if t - w2[-1] == 0:
intersections = np.append(intersections, len(w2) - 1)
# Loop over ticks, and find exact pixel coordinates by linear
# interpolation
for imin in intersections:
imax = imin + 1
if np.allclose(w1[imin], w2[imin], rtol=1.e-13, atol=1.e-13):
continue # tick is exactly aligned with frame
else:
frac = (t - w1[imin]) / (w2[imin] - w1[imin])
x_data_i = spine.data[imin, 0] + frac * (spine.data[imax, 0] - spine.data[imin, 0])
y_data_i = spine.data[imin, 1] + frac * (spine.data[imax, 1] - spine.data[imin, 1])
x_pix_i = spine.pixel[imin, 0] + frac * (spine.pixel[imax, 0] - spine.pixel[imin, 0])
y_pix_i = spine.pixel[imin, 1] + frac * (spine.pixel[imax, 1] - spine.pixel[imin, 1])
delta_angle = tick_angle[imax] - tick_angle[imin]
if delta_angle > 180.:
delta_angle -= 360.
elif delta_angle < -180.:
delta_angle += 360.
angle_i = tick_angle[imin] + frac * delta_angle
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
t *= self._coord_scale_to_deg
world = wrap_angle_at(t, self.coord_wrap)
if self._coord_scale_to_deg is not None:
world /= self._coord_scale_to_deg
else:
world = t
if ticks == 'major':
self.ticks.add(axis=axis,
pixel=(x_data_i, y_data_i),
world=world,
angle=angle_i,
axis_displacement=imin + frac)
# store information to pass to ticklabels.add
# it's faster to format many ticklabels at once outside
# of the loop
self.lblinfo.append(dict(axis=axis,
pixel=(x_pix_i, y_pix_i),
world=world,
angle=spine.normal_angle[imin],
axis_displacement=imin + frac))
self.lbl_world.append(world)
else:
self.ticks.add_minor(minor_axis=axis,
minor_pixel=(x_data_i, y_data_i),
minor_world=world,
minor_angle=angle_i,
minor_axis_displacement=imin + frac)
def display_minor_ticks(self, display_minor_ticks):
"""
Display minor ticks for this coordinate.
Parameters
----------
display_minor_ticks : bool
Whether or not to display minor ticks.
"""
self.ticks.display_minor_ticks(display_minor_ticks)
def get_minor_frequency(self):
return self.minor_frequency
def set_minor_frequency(self, frequency):
"""
Set the frequency of minor ticks per major ticks.
Parameters
----------
frequency : int
The number of minor ticks per major ticks.
"""
self.minor_frequency = frequency
def _update_grid_lines(self):
# For 3-d WCS with a correlated third axis, the *proper* way of
# drawing a grid should be to find the world coordinates of all pixels
# and drawing contours. What we are doing here assumes that we can
# define the grid lines with just two of the coordinates (and
# therefore assumes that the other coordinates are fixed and set to
# the value in the slice). Here we basically assume that if the WCS
# had a third axis, it has been abstracted away in the transformation.
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
tick_world_coordinates_values = tick_world_coordinates.to_value(self.coord_unit)
n_coord = len(tick_world_coordinates_values)
from . import conf
n_samples = conf.grid_samples
xy_world = np.zeros((n_samples * n_coord, 2))
self.grid_lines = []
for iw, w in enumerate(tick_world_coordinates_values):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
if self.coord_index == 0:
xy_world[subset, 0] = np.repeat(w, n_samples)
xy_world[subset, 1] = np.linspace(coord_range[1][0], coord_range[1][1], n_samples)
else:
xy_world[subset, 0] = np.linspace(coord_range[0][0], coord_range[0][1], n_samples)
xy_world[subset, 1] = np.repeat(w, n_samples)
# We now convert all the world coordinates to pixel coordinates in a
# single go rather than doing this in the gridline to path conversion
# to fully benefit from vectorized coordinate transformations.
# Transform line to pixel coordinates
pixel = self.transform.inverted().transform(xy_world)
# Create round-tripped values for checking
xy_world_round = self.transform.transform(pixel)
for iw in range(n_coord):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
self.grid_lines.append(self._get_gridline(xy_world[subset], pixel[subset], xy_world_round[subset]))
def _get_gridline(self, xy_world, pixel, xy_world_round):
if self.coord_type == 'scalar':
return get_gridline_path(xy_world, pixel)
else:
return get_lon_lat_path(xy_world, pixel, xy_world_round)
def _update_grid_contour(self):
if hasattr(self, '_grid') and self._grid:
for line in self._grid.collections:
line.remove()
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
from . import conf
res = conf.contour_grid_samples
x, y = np.meshgrid(np.linspace(xmin, xmax, res),
np.linspace(ymin, ymax, res))
pixel = np.array([x.ravel(), y.ravel()]).T
world = self.transform.transform(pixel)
field = world[:, self.coord_index].reshape(res, res).T
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
# tick_world_coordinates is a Quantities array and we only needs its values
tick_world_coordinates_values = tick_world_coordinates.value
if self.coord_type == 'longitude':
# Find biggest gap in tick_world_coordinates and wrap in middle
# For now just assume spacing is equal, so any mid-point will do
mid = 0.5 * (tick_world_coordinates_values[0] + tick_world_coordinates_values[1])
field = wrap_angle_at(field, mid)
tick_world_coordinates_values = wrap_angle_at(tick_world_coordinates_values, mid)
# Replace wraps by NaN
reset = (np.abs(np.diff(field[:, :-1], axis=0)) > 180) | (np.abs(np.diff(field[:-1, :], axis=1)) > 180)
field[:-1, :-1][reset] = np.nan
field[1:, :-1][reset] = np.nan
field[:-1, 1:][reset] = np.nan
field[1:, 1:][reset] = np.nan
if len(tick_world_coordinates_values) > 0:
self._grid = self.parent_axes.contour(x, y, field.transpose(), levels=np.sort(tick_world_coordinates_values))
else:
self._grid = None
def tick_params(self, which='both', **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this will not work correctly if
the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this will not work
correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : string, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
# First do some sanity checking on the keyword arguments
# colors= is a fallback default for color and labelcolor
if 'colors' in kwargs:
if 'color' not in kwargs:
kwargs['color'] = kwargs['colors']
if 'labelcolor' not in kwargs:
kwargs['labelcolor'] = kwargs['colors']
# The only property that can be set *specifically* for minor ticks is
# the length. In future we could consider having a separate Ticks instance
# for minor ticks so that e.g. the color can be set separately.
if which == 'minor':
if len(set(kwargs) - {'length'}) > 0:
raise ValueError("When setting which='minor', the only "
"property that can be set at the moment is "
"'length' (the minor tick length)")
else:
if 'length' in kwargs:
self.ticks.set_minor_ticksize(kwargs['length'])
return
# At this point, we can now ignore the 'which' argument.
# Set the tick arguments
self.set_ticks(size=kwargs.get('length'),
width=kwargs.get('width'),
color=kwargs.get('color'),
direction=kwargs.get('direction'))
# Set the tick position
position = None
for arg in ('bottom', 'left', 'top', 'right'):
if arg in kwargs and position is None:
position = ''
if kwargs.get(arg):
position += arg[0]
if position is not None:
self.set_ticks_position(position)
# Set the tick label arguments.
self.set_ticklabel(color=kwargs.get('labelcolor'),
size=kwargs.get('labelsize'),
pad=kwargs.get('pad'))
# Set the tick label position
position = None
for arg in ('bottom', 'left', 'top', 'right'):
if 'label' + arg in kwargs and position is None:
position = ''
if kwargs.get('label' + arg):
position += arg[0]
if position is not None:
self.set_ticklabel_position(position)
# And the grid settings
if 'grid_color' in kwargs:
self.grid_lines_kwargs['edgecolor'] = kwargs['grid_color']
if 'grid_alpha' in kwargs:
self.grid_lines_kwargs['alpha'] = kwargs['grid_alpha']
if 'grid_linewidth' in kwargs:
self.grid_lines_kwargs['linewidth'] = kwargs['grid_linewidth']
if 'grid_linestyle' in kwargs:
if kwargs['grid_linestyle'] in LINES_TO_PATCHES_LINESTYLE:
self.grid_lines_kwargs['linestyle'] = LINES_TO_PATCHES_LINESTYLE[kwargs['grid_linestyle']]
else:
self.grid_lines_kwargs['linestyle'] = kwargs['grid_linestyle']
|
1c780b0d206f51a4c62945876cf00aac72b2f665e7aa260333acca581979aced | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file defines the AngleFormatterLocator class which is a class that
# provides both a method for a formatter and one for a locator, for a given
# label spacing. The advantage of keeping the two connected is that we need to
# make sure that the formatter can correctly represent the spacing requested and
# vice versa. For example, a format of dd:mm cannot work with a tick spacing
# that is not a multiple of one arcminute.
import re
import warnings
import numpy as np
from matplotlib import rcParams
from astropy import units as u
from astropy.units import UnitsError
from astropy.coordinates import Angle
DMS_RE = re.compile('^dd(:mm(:ss(.(s)+)?)?)?$')
HMS_RE = re.compile('^hh(:mm(:ss(.(s)+)?)?)?$')
DDEC_RE = re.compile('^d(.(d)+)?$')
DMIN_RE = re.compile('^m(.(m)+)?$')
DSEC_RE = re.compile('^s(.(s)+)?$')
SCAL_RE = re.compile('^x(.(x)+)?$')
# Units with custom representations - see the note where it is used inside
# AngleFormatterLocator.formatter for more details.
CUSTOM_UNITS = {
u.degree: u.def_unit('custom_degree', represents=u.degree,
format={'generic': '\xb0',
'latex': r'^\circ',
'unicode': '°'}),
u.arcmin: u.def_unit('custom_arcmin', represents=u.arcmin,
format={'generic': "'",
'latex': r'^\prime',
'unicode': '′'}),
u.arcsec: u.def_unit('custom_arcsec', represents=u.arcsec,
format={'generic': '"',
'latex': r'^{\prime\prime}',
'unicode': '″'}),
u.hourangle: u.def_unit('custom_hourangle', represents=u.hourangle,
format={'generic': 'h',
'latex': r'^\mathrm{h}',
'unicode': r'$\mathregular{^h}$'})}
class BaseFormatterLocator:
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None):
if len([x for x in (values, number, spacing) if x is None]) < 2:
raise ValueError("At most one of values/number/spacing can be specifed")
if values is not None:
self.values = values
elif number is not None:
self.number = number
elif spacing is not None:
self.spacing = spacing
else:
self.number = 5
self.format = format
@property
def values(self):
return self._values
@values.setter
def values(self, values):
if not isinstance(values, u.Quantity) or (not values.ndim == 1):
raise TypeError("values should be an astropy.units.Quantity array")
if not values.unit.is_equivalent(self._unit):
raise UnitsError("value should be in units compatible with "
"coordinate units ({0}) but found {1}".format(self._unit, values.unit))
self._number = None
self._spacing = None
self._values = values
@property
def number(self):
return self._number
@number.setter
def number(self, number):
self._number = number
self._spacing = None
self._values = None
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
self._number = None
self._spacing = spacing
self._values = None
def minor_locator(self, spacing, frequency, value_min, value_max):
if self.values is not None:
return [] * self._unit
minor_spacing = spacing.value / frequency
values = self._locate_values(value_min, value_max, minor_spacing)
index = np.where((values % frequency) == 0)
index = index[0][0]
values = np.delete(values, np.s_[index::frequency])
return values * minor_spacing * self._unit
@property
def format_unit(self):
return self._format_unit
@format_unit.setter
def format_unit(self, unit):
self._format_unit = u.Unit(unit)
@staticmethod
def _locate_values(value_min, value_max, spacing):
imin = np.ceil(value_min / spacing)
imax = np.floor(value_max / spacing)
values = np.arange(imin, imax + 1, dtype=int)
return values
class AngleFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None,
unit=None, decimal=None, format_unit=None, show_decimal_unit=True):
if unit is None:
unit = u.degree
if format_unit is None:
format_unit = unit
if format_unit not in (u.degree, u.hourangle, u.hour):
if decimal is False:
raise UnitsError("Units should be degrees or hours when using non-decimal (sexagesimal) mode")
self._unit = unit
self._format_unit = format_unit or unit
self._decimal = decimal
self._sep = None
self.show_decimal_unit = show_decimal_unit
super().__init__(values=values, number=number, spacing=spacing,
format=format)
@property
def decimal(self):
decimal = self._decimal
if self.format_unit not in (u.degree, u.hourangle, u.hour):
if self._decimal is None:
decimal = True
elif self._decimal is False:
raise UnitsError("Units should be degrees or hours when using non-decimal (sexagesimal) mode")
elif self._decimal is None:
decimal = False
return decimal
@decimal.setter
def decimal(self, value):
self._decimal = value
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and (not isinstance(spacing, u.Quantity) or
spacing.unit.physical_type != 'angle'):
raise TypeError("spacing should be an astropy.units.Quantity "
"instance with units of angle")
self._number = None
self._spacing = spacing
self._values = None
@property
def sep(self):
return self._sep
@sep.setter
def sep(self, separator):
self._sep = separator
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if DMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.degree
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif HMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.hourangle
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif DDEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.degree
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DMIN_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcmin
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DSEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcsec
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
else:
raise ValueError("Invalid format: {0}".format(value))
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
@property
def base_spacing(self):
if self.decimal:
spacing = self._format_unit / (10. ** self._precision)
else:
if self._fields == 1:
spacing = 1. * u.degree
elif self._fields == 2:
spacing = 1. * u.arcmin
elif self._fields == 3:
if self._precision == 0:
spacing = 1. * u.arcsec
else:
spacing = u.arcsec / (10. ** self._precision)
if self._format_unit is u.hourangle:
spacing *= 15
return spacing
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * u.arcsec
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced).
if value_min == value_max:
return [] * self._unit, 0 * self._unit
if self.spacing is not None:
# spacing was manually specified
spacing_value = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if self.format is not None and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing_value = self.base_spacing.to_value(self._unit)
else:
# otherwise we clip to the nearest 'sensible' spacing
if self.decimal:
from .utils import select_step_scalar
spacing_value = select_step_scalar(dv.to_value(self._format_unit)) * self._format_unit.to(self._unit)
else:
if self._format_unit is u.degree:
from .utils import select_step_degree
spacing_value = select_step_degree(dv).to_value(self._unit)
else:
from .utils import select_step_hour
spacing_value = select_step_hour(dv).to_value(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this.
values = self._locate_values(value_min, value_max, spacing_value)
return values * spacing_value * self._unit, spacing_value * self._unit
def formatter(self, values, spacing, format='auto'):
if not isinstance(values, u.Quantity) and values is not None:
raise TypeError("values should be a Quantities array")
if len(values) > 0:
decimal = self.decimal
unit = self._format_unit
if unit is u.hour:
unit = u.hourangle
if self.format is None:
if decimal:
# Here we assume the spacing can be arbitrary, so for example
# 1.000223 degrees, in which case we don't want to have a
# format that rounds to degrees. So we find the number of
# decimal places we get from representing the spacing as a
# string in the desired units. The easiest way to find
# the smallest number of decimal places required is to
# format the number as a decimal float and strip any zeros
# from the end. We do this rather than just trusting e.g.
# str() because str(15.) == 15.0. We format using 10 decimal
# places by default before stripping the zeros since this
# corresponds to a resolution of less than a microarcecond,
# which should be sufficient.
spacing = spacing.to_value(unit)
fields = 0
precision = len("{0:.10f}".format(spacing).replace('0', ' ').strip().split('.', 1)[1])
else:
spacing = spacing.to_value(unit / 3600)
if spacing >= 3600:
fields = 1
precision = 0
elif spacing >= 60:
fields = 2
precision = 0
elif spacing >= 1:
fields = 3
precision = 0
else:
fields = 3
precision = -int(np.floor(np.log10(spacing)))
else:
fields = self._fields
precision = self._precision
is_latex = format == 'latex' or (format == 'auto' and rcParams['text.usetex'])
if decimal:
# At the moment, the Angle class doesn't have a consistent way
# to always convert angles to strings in decimal form with
# symbols for units (instead of e.g 3arcsec). So as a workaround
# we take advantage of the fact that Angle.to_string converts
# the unit to a string manually when decimal=False and the unit
# is not strictly u.degree or u.hourangle
if self.show_decimal_unit:
decimal = False
sep = 'fromunit'
if is_latex:
fmt = 'latex'
else:
if unit is u.hourangle:
fmt = 'unicode'
else:
fmt = None
unit = CUSTOM_UNITS.get(unit, unit)
else:
sep = None
fmt = None
elif self.sep is not None:
sep = self.sep
fmt = None
else:
sep = 'fromunit'
if unit == u.degree:
if is_latex:
fmt = 'latex'
else:
sep = ('\xb0', "'", '"')
fmt = None
else:
if format == 'ascii':
fmt = None
elif is_latex:
fmt = 'latex'
else:
# Here we still use LaTeX but this is for Matplotlib's
# LaTeX engine - we can't use fmt='latex' as this
# doesn't produce LaTeX output that respects the fonts.
sep = (r'$\mathregular{^h}$', r'$\mathregular{^m}$', r'$\mathregular{^s}$')
fmt = None
angles = Angle(values)
string = angles.to_string(unit=unit,
precision=precision,
decimal=decimal,
fields=fields,
sep=sep,
format=fmt).tolist()
return string
else:
return []
class ScalarFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None,
unit=None, format_unit=None):
if unit is not None:
self._unit = unit
self._format_unit = format_unit or unit
elif spacing is not None:
self._unit = spacing.unit
self._format_unit = format_unit or spacing.unit
elif values is not None:
self._unit = values.unit
self._format_unit = format_unit or values.unit
super().__init__(values=values, number=number, spacing=spacing,
format=format)
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and not isinstance(spacing, u.Quantity):
raise TypeError("spacing should be an astropy.units.Quantity instance")
self._number = None
self._spacing = spacing
self._values = None
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if SCAL_RE.match(value) is not None:
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
elif not value.startswith('%'):
raise ValueError("Invalid format: {0}".format(value))
@property
def base_spacing(self):
return self._format_unit / (10. ** self._precision)
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * self._unit
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced).
if value_min == value_max:
return [] * self._unit, 0 * self._unit
if self.spacing is not None:
# spacing was manually specified
spacing = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if self.format is not None and (not self.format.startswith('%')) and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing = self.base_spacing.to_value(self._unit)
else:
from .utils import select_step_scalar
spacing = select_step_scalar(dv.to_value(self._format_unit)) * self._format_unit.to(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this
values = self._locate_values(value_min, value_max, spacing)
return values * spacing * self._unit, spacing * self._unit
def formatter(self, values, spacing, format='auto'):
if len(values) > 0:
if self.format is None:
if spacing.value < 1.:
precision = -int(np.floor(np.log10(spacing.value)))
else:
precision = 0
elif self.format.startswith('%'):
return [(self.format % x.value) for x in values]
else:
precision = self._precision
return [("{0:." + str(precision) + "f}").format(x.to_value(self._format_unit)) for x in values]
else:
return []
|
3eab0c700a44a0f919e87179c1d1dddad64cafa424bc9a2be98acdca458632a2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from functools import partial
from collections import defaultdict
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.axes import Axes, subplot_class_factory
from matplotlib.transforms import Affine2D, Bbox, Transform
from astropy.coordinates import SkyCoord, BaseCoordinateFrame
from astropy.wcs import WCS
from astropy.wcs.utils import wcs_to_celestial_frame
from .transforms import (WCSPixel2WorldTransform, WCSWorld2PixelTransform,
CoordinateTransform)
from .coordinates_map import CoordinatesMap
from .utils import get_coord_meta, transform_contour_set_inplace
from .frame import EllipticalFrame, RectangularFrame
__all__ = ['WCSAxes', 'WCSAxesSubplot']
VISUAL_PROPERTIES = ['facecolor', 'edgecolor', 'linewidth', 'alpha', 'linestyle']
IDENTITY = WCS(naxis=2)
IDENTITY.wcs.ctype = ["X", "Y"]
IDENTITY.wcs.crval = [0., 0.]
IDENTITY.wcs.crpix = [1., 1.]
IDENTITY.wcs.cdelt = [1., 1.]
class _WCSAxesArtist(Artist):
"""This is a dummy artist to enforce the correct z-order of axis ticks,
tick labels, and gridlines.
FIXME: This is a bit of a hack. ``Axes.draw`` sorts the artists by zorder
and then renders them in sequence. For normal Matplotlib axes, the ticks,
tick labels, and gridlines are included in this list of artists and hence
are automatically drawn in the correct order. However, ``WCSAxes`` disables
the native ticks, labels, and gridlines. Instead, ``WCSAxes.draw`` renders
ersatz ticks, labels, and gridlines by explicitly calling the functions
``CoordinateHelper._draw_ticks``, ``CoordinateHelper._draw_grid``, etc.
This hack would not be necessary if ``WCSAxes`` drew ticks, tick labels,
and gridlines in the standary way."""
def draw(self, renderer, *args, **kwargs):
self.axes.draw_wcsaxes(renderer)
class WCSAxes(Axes):
"""
The main axes class that can be used to show world coordinates from a WCS.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure to add the axes to
rect : list
The position of the axes in the figure in relative units. Should be
given as ``[left, bottom, width, height]``.
wcs : :class:`~astropy.wcs.WCS`, optional
The WCS for the data. If this is specified, ``transform`` cannot be
specified.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data. If this is specified, ``wcs`` cannot be
specified.
coord_meta : dict, optional
A dictionary providing additional metadata when ``transform`` is
specified. This should include the keys ``type``, ``wrap``, and
``unit``. Each of these should be a list with as many items as the
dimension of the WCS. The ``type`` entries should be one of
``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should
give, for the longitude, the angle at which the coordinate wraps (and
`None` otherwise), and the ``unit`` should give the unit of the
coordinates as :class:`~astropy.units.Unit` instances. This can
optionally also include a ``format_unit`` entry giving the units to use
for the tick labels (if not specified, this defaults to ``unit``).
transData : `~matplotlib.transforms.Transform`, optional
Can be used to override the default data -> pixel mapping.
slices : tuple, optional
For WCS transformations with more than two dimensions, we need to
choose which dimensions are being shown in the 2D image. The slice
should contain one ``x`` entry, one ``y`` entry, and the rest of the
values should be integers indicating the slice through the data. The
order of the items in the slice should be the same as the order of the
dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the
order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means
that the first WCS dimension (last Numpy dimension) will be sliced at
an index of 50, the second WCS and Numpy dimension will be shown on the
x axis, and the final WCS dimension (first Numpy dimension) will be
shown on the y-axis (and therefore the data will be plotted using
``data[:, :, 50].transpose()``)
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
"""
def __init__(self, fig, rect, wcs=None, transform=None, coord_meta=None,
transData=None, slices=None, frame_class=RectangularFrame,
**kwargs):
super().__init__(fig, rect, **kwargs)
self._bboxes = []
self.frame_class = frame_class
if not (transData is None):
# User wants to override the transform for the final
# data->pixel mapping
self.transData = transData
self.reset_wcs(wcs=wcs, slices=slices, transform=transform, coord_meta=coord_meta)
self._hide_parent_artists()
self.format_coord = self._display_world_coords
self._display_coords_index = 0
fig.canvas.mpl_connect('key_press_event', self._set_cursor_prefs)
self.patch = self.coords.frame.patch
self._wcsaxesartist = _WCSAxesArtist()
self.add_artist(self._wcsaxesartist)
self._drawn = False
def _display_world_coords(self, x, y):
if not self._drawn:
return ""
if self._display_coords_index == -1:
return "%s %s (pixel)" % (x, y)
pixel = np.array([x, y])
coords = self._all_coords[self._display_coords_index]
world = coords._transform.transform(np.array([pixel]))[0]
xw = coords[self._x_index].format_coord(world[self._x_index], format='ascii')
yw = coords[self._y_index].format_coord(world[self._y_index], format='ascii')
if self._display_coords_index == 0:
system = "world"
else:
system = "world, overlay {0}".format(self._display_coords_index)
coord_string = "%s %s (%s)" % (xw, yw, system)
return coord_string
def _set_cursor_prefs(self, event, **kwargs):
if event.key == 'w':
self._display_coords_index += 1
if self._display_coords_index + 1 > len(self._all_coords):
self._display_coords_index = -1
def _hide_parent_artists(self):
# Turn off spines and current axes
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
self.yaxis.set_visible(False)
# We now overload ``imshow`` because we need to make sure that origin is
# set to ``lower`` for all images, which means that we need to flip RGB
# images.
def imshow(self, X, *args, **kwargs):
"""
Wrapper to Matplotlib's :meth:`~matplotlib.axes.Axes.imshow`.
If an RGB image is passed as a PIL object, it will be flipped
vertically and ``origin`` will be set to ``lower``, since WCS
transformations - like FITS files - assume that the origin is the lower
left pixel of the image (whereas RGB images have the origin in the top
left).
All arguments are passed to :meth:`~matplotlib.axes.Axes.imshow`.
"""
origin = kwargs.pop('origin', 'lower')
# plt.imshow passes origin as None, which we should default to lower.
if origin is None:
origin = 'lower'
elif origin == 'upper':
raise ValueError("Cannot use images with origin='upper' in WCSAxes.")
# To check whether the image is a PIL image we can check if the data
# has a 'getpixel' attribute - this is what Matplotlib's AxesImage does
try:
from PIL.Image import Image, FLIP_TOP_BOTTOM
except ImportError:
# We don't need to worry since PIL is not installed, so user cannot
# have passed RGB image.
pass
else:
if isinstance(X, Image) or hasattr(X, 'getpixel'):
X = X.transpose(FLIP_TOP_BOTTOM)
return super().imshow(X, *args, origin=origin, **kwargs)
def contour(self, *args, **kwargs):
"""
Plot contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contour`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contour`.
"""
# In Matplotlib, when calling contour() with a transform, each
# individual path in the contour map is transformed separately. However,
# this is much too slow for us since each call to the transforms results
# in an Astropy coordinate transformation, which has a non-negligible
# overhead - therefore a better approach is to override contour(), call
# the Matplotlib one with no transform, then apply the transform in one
# go to all the segments that make up the contour map.
transform = kwargs.pop('transform', None)
cset = super().contour(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def contourf(self, *args, **kwargs):
"""
Plot filled contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contourf`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contourf`.
"""
# See notes for contour above.
transform = kwargs.pop('transform', None)
cset = super().contourf(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def plot_coord(self, *args, **kwargs):
"""
Plot `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.plot_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.plot`. All other arguments are the same as
`matplotlib.axes.Axes.plot`. If not specified a ``transform`` keyword
argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to plot on the axes. This is converted to the
first two arguments to `matplotlib.axes.Axes.plot`.
See Also
--------
matplotlib.axes.Axes.plot : This method is called from this function with all arguments passed to it.
"""
if isinstance(args[0], (SkyCoord, BaseCoordinateFrame)):
# Extract the frame from the first argument.
frame0 = args[0]
if isinstance(frame0, SkyCoord):
frame0 = frame0.frame
plot_data = []
for coord in self.coords:
if coord.coord_type == 'longitude':
plot_data.append(frame0.data.lon.to_value(coord.coord_unit))
elif coord.coord_type == 'latitude':
plot_data.append(frame0.data.lat.to_value(coord.coord_unit))
else:
raise NotImplementedError("Coordinates cannot be plotted with this "
"method because the WCS does not represent longitude/latitude.")
if 'transform' in kwargs.keys():
raise TypeError("The 'transform' keyword argument is not allowed,"
" as it is automatically determined by the input coordinate frame.")
transform = self.get_transform(frame0)
kwargs.update({'transform': transform})
args = tuple(plot_data) + args[1:]
super().plot(*args, **kwargs)
def reset_wcs(self, wcs=None, slices=None, transform=None, coord_meta=None):
"""
Reset the current Axes, to use a new WCS object.
"""
# Here determine all the coordinate axes that should be shown.
if wcs is None and transform is None:
self.wcs = IDENTITY
else:
# We now force call 'set', which ensures the WCS object is
# consistent, which will only be important if the WCS has been set
# by hand. For example if the user sets a celestial WCS by hand and
# forgets to set the units, WCS.wcs.set() will do this.
if wcs is not None:
wcs.wcs.set()
self.wcs = wcs
# If we are making a new WCS, we need to preserve the path object since
# it may already be used by objects that have been plotted, and we need
# to continue updating it. CoordinatesMap will create a new frame
# instance, but we can tell that instance to keep using the old path.
if hasattr(self, 'coords'):
previous_frame = {'path': self.coords.frame._path,
'color': self.coords.frame.get_color(),
'linewidth': self.coords.frame.get_linewidth()}
else:
previous_frame = {'path': None}
self.coords = CoordinatesMap(self, wcs=self.wcs, slice=slices,
transform=transform, coord_meta=coord_meta,
frame_class=self.frame_class,
previous_frame_path=previous_frame['path'])
if previous_frame['path'] is not None:
self.coords.frame.set_color(previous_frame['color'])
self.coords.frame.set_linewidth(previous_frame['linewidth'])
self._all_coords = [self.coords]
if slices is None:
self.slices = ('x', 'y')
self._x_index = 0
self._y_index = 1
else:
self.slices = slices
self._x_index = self.slices.index('x')
self._y_index = self.slices.index('y')
# Common default settings for Rectangular Frame
if self.frame_class is RectangularFrame:
for coord_index in range(len(self.slices)):
if self.slices[coord_index] == 'x':
self.coords[coord_index].set_axislabel_position('b')
self.coords[coord_index].set_ticklabel_position('b')
elif self.slices[coord_index] == 'y':
self.coords[coord_index].set_axislabel_position('l')
self.coords[coord_index].set_ticklabel_position('l')
else:
self.coords[coord_index].set_axislabel_position('')
self.coords[coord_index].set_ticklabel_position('')
self.coords[coord_index].set_ticks_position('')
# Common default settings for Elliptical Frame
elif self.frame_class is EllipticalFrame:
for coord_index in range(len(self.slices)):
if self.slices[coord_index] == 'x':
self.coords[coord_index].set_axislabel_position('h')
self.coords[coord_index].set_ticklabel_position('h')
self.coords[coord_index].set_ticks_position('h')
elif self.slices[coord_index] == 'y':
self.coords[coord_index].set_ticks_position('c')
self.coords[coord_index].set_axislabel_position('c')
self.coords[coord_index].set_ticklabel_position('c')
else:
self.coords[coord_index].set_axislabel_position('')
self.coords[coord_index].set_ticklabel_position('')
self.coords[coord_index].set_ticks_position('')
if rcParams['axes.grid']:
self.grid()
def draw_wcsaxes(self, renderer):
# Here need to find out range of all coordinates, and update range for
# each coordinate axis. For now, just assume it covers the whole sky.
self._bboxes = []
# This generates a structure like [coords][axis] = [...]
ticklabels_bbox = defaultdict(partial(defaultdict, list))
ticks_locs = defaultdict(partial(defaultdict, list))
visible_ticks = []
for coords in self._all_coords:
coords.frame.update()
for coord in coords:
coord._draw_grid(renderer)
for coords in self._all_coords:
for coord in coords:
coord._draw_ticks(renderer, bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox[coord],
ticks_locs=ticks_locs[coord])
visible_ticks.extend(coord.ticklabels.get_visible_axes())
for coords in self._all_coords:
for coord in coords:
coord._draw_axislabels(renderer, bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox,
ticks_locs=ticks_locs[coord],
visible_ticks=visible_ticks)
self.coords.frame.draw(renderer)
def draw(self, renderer, inframe=False):
# In Axes.draw, the following code can result in the xlim and ylim
# values changing, so we need to force call this here to make sure that
# the limits are correct before we update the patch.
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
if self._axisbelow is True:
self._wcsaxesartist.set_zorder(0.5)
elif self._axisbelow is False:
self._wcsaxesartist.set_zorder(2.5)
else:
# 'line': above patches, below lines
self._wcsaxesartist.set_zorder(1.5)
# We need to make sure that that frame path is up to date
self.coords.frame._update_patch_path()
super().draw(renderer, inframe=inframe)
self._drawn = True
# MATPLOTLIB_LT_30: The ``kwargs.pop('label', None)`` is to ensure
# compatibility with Matplotlib 2.x (which has label) and 3.x (which has
# xlabel). While these are meant to be a single positional argument,
# Matplotlib internally sometimes specifies e.g. set_xlabel(xlabel=...).
def set_xlabel(self, xlabel=None, labelpad=1, **kwargs):
if xlabel is None:
xlabel = kwargs.pop('label', None)
if xlabel is None:
raise TypeError("set_xlabel() missing 1 required positional argument: 'xlabel'")
self.coords[self._x_index].set_axislabel(xlabel, minpad=labelpad, **kwargs)
def set_ylabel(self, ylabel=None, labelpad=1, **kwargs):
if ylabel is None:
ylabel = kwargs.pop('label', None)
if ylabel is None:
raise TypeError("set_ylabel() missing 1 required positional argument: 'ylabel'")
self.coords[self._y_index].set_axislabel(ylabel, minpad=labelpad, **kwargs)
def get_xlabel(self):
return self.coords[self._x_index].get_axislabel()
def get_ylabel(self):
return self.coords[self._y_index].get_axislabel()
def get_coords_overlay(self, frame, coord_meta=None):
# Here we can't use get_transform because that deals with
# pixel-to-pixel transformations when passing a WCS object.
if isinstance(frame, WCS):
coords = CoordinatesMap(self, frame, frame_class=self.frame_class)
else:
if coord_meta is None:
coord_meta = get_coord_meta(frame)
transform = self._get_transform_no_transdata(frame)
coords = CoordinatesMap(self, transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class)
self._all_coords.append(coords)
# Common settings for overlay
coords[0].set_axislabel_position('t')
coords[1].set_axislabel_position('r')
coords[0].set_ticklabel_position('t')
coords[1].set_ticklabel_position('r')
self.overlay_coords = coords
return coords
def get_transform(self, frame):
"""
Return a transform from the specified frame to display coordinates.
This does not include the transData transformation
Parameters
----------
frame : :class:`~astropy.wcs.WCS` or :class:`~matplotlib.transforms.Transform` or str
The ``frame`` parameter can have several possible types:
* :class:`~astropy.wcs.WCS` instance: assumed to be a
transformation from pixel to world coordinates, where the
world coordinates are the same as those in the WCS
transformation used for this ``WCSAxes`` instance. This is
used for example to show contours, since this involves
plotting an array in pixel coordinates that are not the
final data coordinate and have to be transformed to the
common world coordinate system first.
* :class:`~matplotlib.transforms.Transform` instance: it is
assumed to be a transform to the world coordinates that are
part of the WCS used to instantiate this ``WCSAxes``
instance.
* ``'pixel'`` or ``'world'``: return a transformation that
allows users to plot in pixel/data coordinates (essentially
an identity transform) and ``world`` (the default
world-to-pixel transformation used to instantiate the
``WCSAxes`` instance).
* ``'fk5'`` or ``'galactic'``: return a transformation from
the specified frame to the pixel/data coordinates.
* :class:`~astropy.coordinates.BaseCoordinateFrame` instance.
"""
return self._get_transform_no_transdata(frame).inverted() + self.transData
def _get_transform_no_transdata(self, frame):
"""
Return a transform from data to the specified frame
"""
if self.wcs is None and frame != 'pixel':
raise ValueError('No WCS specified, so only pixel coordinates are available')
if isinstance(frame, WCS):
coord_in = wcs_to_celestial_frame(self.wcs)
coord_out = wcs_to_celestial_frame(frame)
if coord_in == coord_out:
return (WCSPixel2WorldTransform(self.wcs, slice=self.slices) +
WCSWorld2PixelTransform(frame))
else:
return (WCSPixel2WorldTransform(self.wcs, slice=self.slices) +
CoordinateTransform(self.wcs, frame) +
WCSWorld2PixelTransform(frame))
elif frame == 'pixel':
return Affine2D()
elif isinstance(frame, Transform):
pixel2world = WCSPixel2WorldTransform(self.wcs, slice=self.slices)
return pixel2world + frame
else:
pixel2world = WCSPixel2WorldTransform(self.wcs, slice=self.slices)
if frame == 'world':
return pixel2world
else:
coordinate_transform = CoordinateTransform(self.wcs, frame)
if coordinate_transform.same_frames:
return pixel2world
else:
return pixel2world + CoordinateTransform(self.wcs, frame)
def get_tightbbox(self, renderer, *args, **kwargs):
# FIXME: we should determine what to do with the extra arguments here.
# Note that the expected signature of this method is different in
# Matplotlib 3.x compared to 2.x.
if not self.get_visible():
return
bb = [b for b in self._bboxes if b and (b.width != 0 or b.height != 0)]
if bb:
_bbox = Bbox.union(bb)
return _bbox
else:
return self.get_window_extent(renderer)
def grid(self, b=None, axis='both', *, which='major', **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments. This behaves like `matplotlib.axes.Axes`
except that if no arguments are specified, the grid is shown rather
than toggled.
Parameters
----------
b : bool
Whether to show the gridlines.
"""
if not hasattr(self, 'coords'):
return
if which != 'major':
raise NotImplementedError('Plotting the grid for the minor ticks is '
'not supported.')
if axis == 'both':
self.coords.grid(draw_grid=b, **kwargs)
elif axis == 'x':
self.coords[0].grid(draw_grid=b, **kwargs)
elif axis == 'y':
self.coords[1].grid(draw_grid=b, **kwargs)
else:
raise ValueError('axis should be one of x/y/both')
def tick_params(self, axis='both', **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
axis : int or str, optional
Which axis to apply the parameters to. This defaults to 'both'
but this can also be set to an `int` or `str` that refers to the
axis to apply it to, following the valid values that can index
``ax.coords``. Note that ``'x'`` and ``'y``' are also accepted in
the case of rectangular axes.
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : string, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
if not hasattr(self, 'coords'):
# Axes haven't been fully initialized yet, so just ignore, as
# Axes.__init__ calls this method
return
if axis == 'both':
for pos in ('bottom', 'left', 'top', 'right'):
if pos in kwargs:
raise ValueError("Cannot specify {0}= when axis='both'".format(pos))
if 'label' + pos in kwargs:
raise ValueError("Cannot specify label{0}= when axis='both'".format(pos))
for coord in self.coords:
coord.tick_params(**kwargs)
elif axis in self.coords:
self.coords[axis].tick_params(**kwargs)
elif axis in ('x', 'y'):
if self.frame_class is RectangularFrame:
for coord_index in range(len(self.slices)):
if self.slices[coord_index] == axis:
self.coords[coord_index].tick_params(**kwargs)
# In the following, we put the generated subplot class in a temporary class and
# we then inherit it - if we don't do this, the generated class appears to
# belong in matplotlib, not in WCSAxes, from the API's point of view.
class WCSAxesSubplot(subplot_class_factory(WCSAxes)):
"""
A subclass class for WCSAxes
"""
pass
|
f8b9857ece65261c96baa119d87a867c7d9eddee281684653a5ebfabe668b9a6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib import rcParams
from matplotlib.text import Text
from .frame import RectangularFrame
def sort_using(X, Y):
return [x for (y, x) in sorted(zip(Y, X))]
class TickLabels(Text):
def __init__(self, frame, *args, **kwargs):
self.clear()
self._frame = frame
super().__init__(*args, **kwargs)
self.set_clip_on(True)
self.set_visible_axes('all')
self.set_pad(rcParams['xtick.major.pad'])
self._exclude_overlapping = False
# Check rcParams
if 'color' not in kwargs:
self.set_color(rcParams['xtick.color'])
if 'size' not in kwargs:
self.set_size(rcParams['xtick.labelsize'])
def clear(self):
self.world = {}
self.pixel = {}
self.angle = {}
self.text = {}
self.disp = {}
def add(self, axis, world, pixel, angle, text, axis_displacement):
if axis not in self.world:
self.world[axis] = [world]
self.pixel[axis] = [pixel]
self.angle[axis] = [angle]
self.text[axis] = [text]
self.disp[axis] = [axis_displacement]
else:
self.world[axis].append(world)
self.pixel[axis].append(pixel)
self.angle[axis].append(angle)
self.text[axis].append(text)
self.disp[axis].append(axis_displacement)
def sort(self):
"""
Sort by axis displacement, which allows us to figure out which parts
of labels to not repeat.
"""
for axis in self.world:
self.world[axis] = sort_using(self.world[axis], self.disp[axis])
self.pixel[axis] = sort_using(self.pixel[axis], self.disp[axis])
self.angle[axis] = sort_using(self.angle[axis], self.disp[axis])
self.text[axis] = sort_using(self.text[axis], self.disp[axis])
self.disp[axis] = sort_using(self.disp[axis], self.disp[axis])
def simplify_labels(self):
"""
Figure out which parts of labels can be dropped to avoid repetition.
"""
self.sort()
for axis in self.world:
t1 = self.text[axis][0]
for i in range(1, len(self.world[axis])):
t2 = self.text[axis][i]
if len(t1) != len(t2):
t1 = self.text[axis][i]
continue
start = 0
# In the following loop, we need to ignore the last character,
# hence the len(t1) - 1. This is because if we have two strings
# like 13d14m15s we want to make sure that we keep the last
# part (15s) even if the two labels are identical.
for j in range(len(t1) - 1):
if t1[j] != t2[j]:
break
if t1[j] not in '-0123456789.':
start = j + 1
t1 = self.text[axis][i]
if start != 0:
starts_dollar = self.text[axis][i].startswith('$')
self.text[axis][i] = self.text[axis][i][start:]
if starts_dollar:
self.text[axis][i] = '$' + self.text[axis][i]
def set_pad(self, value):
self._pad = value
def get_pad(self):
return self._pad
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
def get_visible_axes(self):
if self._visible_axes == 'all':
return self.world.keys()
else:
return [x for x in self._visible_axes if x in self.world]
def set_exclude_overlapping(self, exclude_overlapping):
self._exclude_overlapping = exclude_overlapping
def draw(self, renderer, bboxes, ticklabels_bbox, tick_out_size):
if not self.get_visible():
return
self.simplify_labels()
text_size = renderer.points_to_pixels(self.get_size())
for axis in self.get_visible_axes():
for i in range(len(self.world[axis])):
# In the event that the label is empty (which is not expected
# but could happen in unforeseen corner cases), we should just
# skip to the next label.
if self.text[axis][i] == '':
continue
self.set_text(self.text[axis][i])
x, y = self.pixel[axis][i]
pad = renderer.points_to_pixels(self.get_pad() + tick_out_size)
if isinstance(self._frame, RectangularFrame):
# This is just to preserve the current results, but can be
# removed next time the reference images are re-generated.
if np.abs(self.angle[axis][i]) < 45.:
ha = 'right'
va = 'bottom'
dx = -pad
dy = -text_size * 0.5
elif np.abs(self.angle[axis][i] - 90.) < 45:
ha = 'center'
va = 'bottom'
dx = 0
dy = -text_size - pad
elif np.abs(self.angle[axis][i] - 180.) < 45:
ha = 'left'
va = 'bottom'
dx = pad
dy = -text_size * 0.5
else:
ha = 'center'
va = 'bottom'
dx = 0
dy = pad
self.set_position((x + dx, y + dy))
self.set_ha(ha)
self.set_va(va)
else:
# This is the more general code for arbitrarily oriented
# axes
# Set initial position and find bounding box
self.set_position((x, y))
bb = super().get_window_extent(renderer)
# Find width and height, as well as angle at which we
# transition which side of the label we use to anchor the
# label.
width = bb.width
height = bb.height
# Project axis angle onto bounding box
ax = np.cos(np.radians(self.angle[axis][i]))
ay = np.sin(np.radians(self.angle[axis][i]))
# Set anchor point for label
if np.abs(self.angle[axis][i]) < 45.:
dx = width
dy = ay * height
elif np.abs(self.angle[axis][i] - 90.) < 45:
dx = ax * width
dy = height
elif np.abs(self.angle[axis][i] - 180.) < 45:
dx = -width
dy = ay * height
else:
dx = ax * width
dy = -height
dx *= 0.5
dy *= 0.5
# Find normalized vector along axis normal, so as to be
# able to nudge the label away by a constant padding factor
dist = np.hypot(dx, dy)
ddx = dx / dist
ddy = dy / dist
dx += ddx * pad
dy += ddy * pad
self.set_position((x - dx, y - dy))
self.set_ha('center')
self.set_va('center')
bb = super().get_window_extent(renderer)
# TODO: the problem here is that we might get rid of a label
# that has a key starting bit such as -0:30 where the -0
# might be dropped from all other labels.
if not self._exclude_overlapping or bb.count_overlaps(bboxes) == 0:
super().draw(renderer)
bboxes.append(bb)
ticklabels_bbox[axis].append(bb)
|
58aef50704fde51635359a12a7f2d9860edd980c46c0574ff72871d212fac619 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.lines import Path, Line2D
from matplotlib.transforms import Affine2D
from matplotlib import rcParams
class Ticks(Line2D):
"""
Ticks are derived from Line2D, and note that ticks themselves
are markers. Thus, you should use set_mec, set_mew, etc.
To change the tick size (length), you need to use
set_ticksize. To change the direction of the ticks (ticks are
in opposite direction of ticklabels by default), use
set_tick_out(False).
Note that Matplotlib's defaults dictionary :data:`~matplotlib.rcParams`
contains default settings (color, size, width) of the form `xtick.*` and
`ytick.*`. In a WCS projection, there may not be a clear relationship
between axes of the projection and 'x' or 'y' axes. For this reason,
we read defaults from `xtick.*`. The following settings affect the
default appearance of ticks:
* `xtick.direction`
* `xtick.major.size`
* `xtick.major.width`
* `xtick.minor.size`
* `xtick.color`
"""
def __init__(self, ticksize=None, tick_out=None, **kwargs):
if ticksize is None:
ticksize = rcParams['xtick.major.size']
self.set_ticksize(ticksize)
self.set_minor_ticksize(rcParams['xtick.minor.size'])
self.set_tick_out(rcParams['xtick.direction'] == 'out')
self.clear()
line2d_kwargs = {'color': rcParams['xtick.color'],
'linewidth': rcParams['xtick.major.width']}
line2d_kwargs.update(kwargs)
Line2D.__init__(self, [0.], [0.], **line2d_kwargs)
self.set_visible_axes('all')
self._display_minor_ticks = False
def display_minor_ticks(self, display_minor_ticks):
self._display_minor_ticks = display_minor_ticks
def get_display_minor_ticks(self):
return self._display_minor_ticks
def set_tick_out(self, tick_out):
"""
set True if tick need to be rotated by 180 degree.
"""
self._tick_out = tick_out
def get_tick_out(self):
"""
Return True if the tick will be rotated by 180 degree.
"""
return self._tick_out
def set_ticksize(self, ticksize):
"""
set length of the ticks in points.
"""
self._ticksize = ticksize
def get_ticksize(self):
"""
Return length of the ticks in points.
"""
return self._ticksize
def set_minor_ticksize(self, ticksize):
"""
set length of the minor ticks in points.
"""
self._minor_ticksize = ticksize
def get_minor_ticksize(self):
"""
Return length of the minor ticks in points.
"""
return self._minor_ticksize
@property
def out_size(self):
if self._tick_out:
return self._ticksize
else:
return 0.
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
def get_visible_axes(self):
if self._visible_axes == 'all':
return self.world.keys()
else:
return [x for x in self._visible_axes if x in self.world]
def clear(self):
self.world = {}
self.pixel = {}
self.angle = {}
self.disp = {}
self.minor_world = {}
self.minor_pixel = {}
self.minor_angle = {}
self.minor_disp = {}
def add(self, axis, world, pixel, angle, axis_displacement):
if axis not in self.world:
self.world[axis] = [world]
self.pixel[axis] = [pixel]
self.angle[axis] = [angle]
self.disp[axis] = [axis_displacement]
else:
self.world[axis].append(world)
self.pixel[axis].append(pixel)
self.angle[axis].append(angle)
self.disp[axis].append(axis_displacement)
def get_minor_world(self):
return self.minor_world
def add_minor(self, minor_axis, minor_world, minor_pixel, minor_angle,
minor_axis_displacement):
if minor_axis not in self.minor_world:
self.minor_world[minor_axis] = [minor_world]
self.minor_pixel[minor_axis] = [minor_pixel]
self.minor_angle[minor_axis] = [minor_angle]
self.minor_disp[minor_axis] = [minor_axis_displacement]
else:
self.minor_world[minor_axis].append(minor_world)
self.minor_pixel[minor_axis].append(minor_pixel)
self.minor_angle[minor_axis].append(minor_angle)
self.minor_disp[minor_axis].append(minor_axis_displacement)
def __len__(self):
return len(self.world)
_tickvert_path = Path([[0., 0.], [1., 0.]])
def draw(self, renderer, ticks_locs):
"""
Draw the ticks.
"""
if not self.get_visible():
return
offset = renderer.points_to_pixels(self.get_ticksize())
self._draw_ticks(renderer, self.pixel, self.angle, offset, ticks_locs)
if self._display_minor_ticks:
offset = renderer.points_to_pixels(self.get_minor_ticksize())
self._draw_ticks(renderer, self.minor_pixel, self.minor_angle, offset, ticks_locs)
def _draw_ticks(self, renderer, pixel_array, angle_array, offset, ticks_locs):
"""
Draw the minor ticks.
"""
path_trans = self.get_transform()
gc = renderer.new_gc()
gc.set_foreground(self.get_color())
gc.set_alpha(self.get_alpha())
gc.set_linewidth(self.get_linewidth())
marker_scale = Affine2D().scale(offset, offset)
marker_rotation = Affine2D()
marker_transform = marker_scale + marker_rotation
initial_angle = 180. if self.get_tick_out() else 0.
for axis in self.get_visible_axes():
if axis not in pixel_array:
continue
for loc, angle in zip(pixel_array[axis], angle_array[axis]):
# Set the rotation for this tick
marker_rotation.rotate_deg(initial_angle + angle)
# Draw the markers
locs = path_trans.transform_non_affine(np.array([loc, loc]))
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
Path(locs), path_trans.get_affine())
# Reset the tick rotation before moving to the next tick
marker_rotation.clear()
ticks_locs[axis].append(locs)
gc.restore()
|
5b5df07ae523e20590527f97d3a7ac3a0d20619fc8ad349191fa66f71d1ca7f0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.patches import Polygon
from astropy import units as u
from astropy.coordinates.representation import UnitSphericalRepresentation
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product
__all__ = ['SphericalCircle']
def _rotate_polygon(lon, lat, lon0, lat0):
"""
Given a polygon with vertices defined by (lon, lat), rotate the polygon
such that the North pole of the spherical coordinates is now at (lon0,
lat0). Therefore, to end up with a polygon centered on (lon0, lat0), the
polygon should initially be drawn around the North pole.
"""
# Create a representation object
polygon = UnitSphericalRepresentation(lon=lon, lat=lat)
# Determine rotation matrix to make it so that the circle is centered
# on the correct longitude/latitude.
m1 = rotation_matrix(-(0.5 * np.pi * u.radian - lat0), axis='y')
m2 = rotation_matrix(-lon0, axis='z')
transform_matrix = matrix_product(m2, m1)
# Apply 3D rotation
polygon = polygon.to_cartesian()
polygon = polygon.transform(transform_matrix)
polygon = UnitSphericalRepresentation.from_cartesian(polygon)
return polygon.lon, polygon.lat
class SphericalCircle(Polygon):
"""
Create a patch representing a spherical circle - that is, a circle that is
formed of all the points that are within a certain angle of the central
coordinates on a sphere. Here we assume that latitude goes from -90 to +90
This class is needed in cases where the user wants to add a circular patch
to a celestial image, since otherwise the circle will be distorted, because
a fixed interval in longitude corresponds to a different angle on the sky
depending on the latitude.
Parameters
----------
center : tuple or `~astropy.units.Quantity`
This can be either a tuple of two `~astropy.units.Quantity` objects, or
a single `~astropy.units.Quantity` array with two elements.
radius : `~astropy.units.Quantity`
The radius of the circle
resolution : int, optional
The number of points that make up the circle - increase this to get a
smoother circle.
vertex_unit : `~astropy.units.Unit`
The units in which the resulting polygon should be defined - this
should match the unit that the transformation (e.g. the WCS
transformation) expects as input.
Notes
-----
Additional keyword arguments are passed to `~matplotlib.patches.Polygon`
"""
def __init__(self, center, radius, resolution=100, vertex_unit=u.degree, **kwargs):
# Extract longitude/latitude, either from a tuple of two quantities, or
# a single 2-element Quantity.
longitude, latitude = center
# Start off by generating the circle around the North pole
lon = np.linspace(0., 2 * np.pi, resolution + 1)[:-1] * u.radian
lat = np.repeat(0.5 * np.pi - radius.to_value(u.radian), resolution) * u.radian
lon, lat = _rotate_polygon(lon, lat, longitude, latitude)
# Extract new longitude/latitude in the requested units
lon = lon.to_value(vertex_unit)
lat = lat.to_value(vertex_unit)
# Create polygon vertices
vertices = np.array([lon, lat]).transpose()
super().__init__(vertices, **kwargs)
|
9bd69290e595124e5d9e03e2bcfe3889f44c94957a635cf780f77c9ed70c26a8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .coordinate_helpers import CoordinateHelper
from .transforms import WCSPixel2WorldTransform
from .utils import coord_type_from_ctype
from .frame import RectangularFrame
from .coordinate_range import find_coordinate_range
class CoordinatesMap:
"""
A container for coordinate helpers that represents a coordinate system.
This object can be used to access coordinate helpers by index (like a list)
or by name (like a dictionary).
Parameters
----------
axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`
The axes the coordinate map belongs to.
wcs : :class:`~astropy.wcs.WCS`, optional
The WCS for the data. If this is specified, ``transform`` cannot be
specified.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data. If this is specified, ``wcs`` cannot be
specified.
coord_meta : dict, optional
A dictionary providing additional metadata when ``transform`` is
specified. This should include the keys ``type``, ``wrap``, and
``unit``. Each of these should be a list with as many items as the
dimension of the WCS. The ``type`` entries should be one of
``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should
give, for the longitude, the angle at which the coordinate wraps (and
`None` otherwise), and the ``unit`` should give the unit of the
coordinates as :class:`~astropy.units.Unit` instances. This can
optionally also include a ``format_unit`` entry giving the units to use
for the tick labels (if not specified, this defaults to ``unit``).
slice : tuple, optional
For WCS transformations with more than two dimensions, we need to
choose which dimensions are being shown in the 2D image. The slice
should contain one ``x`` entry, one ``y`` entry, and the rest of the
values should be integers indicating the slice through the data. The
order of the items in the slice should be the same as the order of the
dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the
order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means
that the first WCS dimension (last Numpy dimension) will be sliced at
an index of 50, the second WCS and Numpy dimension will be shown on the
x axis, and the final WCS dimension (first Numpy dimension) will be
shown on the y-axis (and therefore the data will be plotted using
``data[:, :, 50].transpose()``)
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
previous_frame_path : `~matplotlib.path.Path`, optional
When changing the WCS of the axes, the frame instance will change but
we might want to keep re-using the same underlying matplotlib
`~matplotlib.path.Path` - in that case, this can be passed to this
keyword argument.
"""
def __init__(self, axes, wcs=None, transform=None, coord_meta=None,
slice=None, frame_class=RectangularFrame,
previous_frame_path=None):
# Keep track of parent axes and WCS
self._axes = axes
if wcs is None:
if transform is None:
raise ValueError("Either `wcs` or `transform` are required")
if coord_meta is None:
raise ValueError("`coord_meta` is required when "
"`transform` is passed")
self._transform = transform
naxis = 2
else:
if transform is not None:
raise ValueError("Cannot specify both `wcs` and `transform`")
if coord_meta is not None:
raise ValueError("Cannot pass `coord_meta` if passing `wcs`")
self._transform = WCSPixel2WorldTransform(wcs, slice=slice)
naxis = wcs.wcs.naxis
self.frame = frame_class(axes, self._transform, path=previous_frame_path)
# Set up coordinates
self._coords = []
self._aliases = {}
for coord_index in range(naxis):
# Extract coordinate metadata from WCS object or transform
if wcs is not None:
coord_unit = wcs.wcs.cunit[coord_index]
coord_type, format_unit, coord_wrap = coord_type_from_ctype(wcs.wcs.ctype[coord_index])
name = wcs.wcs.ctype[coord_index][:4].replace('-', '')
else:
try:
coord_type = coord_meta['type'][coord_index]
coord_wrap = coord_meta['wrap'][coord_index]
coord_unit = coord_meta['unit'][coord_index]
name = coord_meta['name'][coord_index]
if 'format_unit' in coord_meta:
format_unit = coord_meta['format_unit'][coord_index]
else:
format_unit = None
except IndexError:
raise ValueError("coord_meta items should have a length of {0}".format(len(wcs.wcs.naxis)))
self._coords.append(CoordinateHelper(parent_axes=axes,
parent_map=self,
transform=self._transform,
coord_index=coord_index,
coord_type=coord_type,
coord_wrap=coord_wrap,
coord_unit=coord_unit,
format_unit=format_unit,
frame=self.frame))
# Set up aliases for coordinates
self._aliases[name.lower()] = coord_index
def __getitem__(self, item):
if isinstance(item, str):
return self._coords[self._aliases[item.lower()]]
else:
return self._coords[item]
def __contains__(self, item):
if isinstance(item, str):
return item.lower() in self._aliases
else:
return 0 <= item < len(self._coords)
def set_visible(self, visibility):
raise NotImplementedError()
def __iter__(self):
for coord in self._coords:
yield coord
def grid(self, draw_grid=True, grid_type=None, **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : { 'lines' | 'contours' }
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended. By default, 'lines' is used if the transform has
an inverse, otherwise 'contours' is used.
"""
for coord in self:
coord.grid(draw_grid=draw_grid, grid_type=grid_type, **kwargs)
def get_coord_range(self):
xmin, xmax = self._axes.get_xlim()
ymin, ymax = self._axes.get_ylim()
return find_coordinate_range(self._transform,
[xmin, xmax, ymin, ymax],
[coord.coord_type for coord in self],
[coord.coord_unit for coord in self])
|
3208bf678bb98e05a294ddde821fb2e31d2b45db0a16c732034f0662425f66b7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.coordinates import BaseCoordinateFrame
__all__ = ['select_step_degree', 'select_step_hour', 'select_step_scalar',
'coord_type_from_ctype', 'transform_contour_set_inplace']
def select_step_degree(dv):
# Modified from axis_artist, supports astropy.units
if dv > 1. * u.arcsec:
degree_limits_ = [1.5, 3, 7, 13, 20, 40, 70, 120, 270, 520]
degree_steps_ = [1, 2, 5, 10, 15, 30, 45, 90, 180, 360]
degree_units = [u.degree] * len(degree_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 8, 11, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 5, 10, 15, 20, 30]
minute_limits_ = np.array(minsec_limits_) / 60.
minute_units = [u.arcmin] * len(minute_limits_)
second_limits_ = np.array(minsec_limits_) / 3600.
second_units = [u.arcsec] * len(second_limits_)
degree_limits = np.concatenate([second_limits_,
minute_limits_,
degree_limits_])
degree_steps = minsec_steps_ + minsec_steps_ + degree_steps_
degree_units = second_units + minute_units + degree_units
n = degree_limits.searchsorted(dv.to(u.degree))
step = degree_steps[n]
unit = degree_units[n]
return step * unit
else:
return select_step_scalar(dv.to_value(u.arcsec)) * u.arcsec
def select_step_hour(dv):
if dv > 15. * u.arcsec:
hour_limits_ = [1.5, 2.5, 3.5, 5, 7, 10, 15, 21, 36]
hour_steps_ = [1, 2, 3, 4, 6, 8, 12, 18, 24]
hour_units = [u.hourangle] * len(hour_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 4.5, 5.5, 8, 11, 14, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30]
minute_limits_ = np.array(minsec_limits_) / 60.
minute_units = [15. * u.arcmin] * len(minute_limits_)
second_limits_ = np.array(minsec_limits_) / 3600.
second_units = [15. * u.arcsec] * len(second_limits_)
hour_limits = np.concatenate([second_limits_,
minute_limits_,
hour_limits_])
hour_steps = minsec_steps_ + minsec_steps_ + hour_steps_
hour_units = second_units + minute_units + hour_units
n = hour_limits.searchsorted(dv.to(u.hourangle))
step = hour_steps[n]
unit = hour_units[n]
return step * unit
else:
return select_step_scalar(dv.to_value(15. * u.arcsec)) * (15. * u.arcsec)
def select_step_scalar(dv):
log10_dv = np.log10(dv)
base = np.floor(log10_dv)
frac = log10_dv - base
steps = np.log10([1, 2, 5, 10])
imin = np.argmin(np.abs(frac - steps))
return 10. ** (base + steps[imin])
def get_coord_meta(frame):
coord_meta = {}
coord_meta['type'] = ('longitude', 'latitude')
coord_meta['wrap'] = (None, None)
coord_meta['unit'] = (u.deg, u.deg)
from astropy.coordinates import frame_transform_graph
if isinstance(frame, str):
initial_frame = frame
frame = frame_transform_graph.lookup_name(frame)
if frame is None:
raise ValueError("Unknown frame: {0}".format(initial_frame))
if not isinstance(frame, BaseCoordinateFrame):
frame = frame()
names = list(frame.representation_component_names.keys())
coord_meta['name'] = names[:2]
return coord_meta
def coord_type_from_ctype(ctype):
"""
Determine whether a particular WCS ctype corresponds to an angle or scalar
coordinate.
"""
if ctype[:4] == 'RA--':
return 'longitude', u.hourangle, None
elif ctype[:4] == 'HPLN':
return 'longitude', u.arcsec, 180.
elif ctype[:4] == 'HPLT':
return 'latitude', u.arcsec, None
elif ctype[:4] == 'HGLN':
return 'longitude', None, 180.
elif ctype[1:4] == 'LON' or ctype[2:4] == 'LN':
return 'longitude', None, None
elif ctype[:4] == 'DEC-' or ctype[1:4] == 'LAT' or ctype[2:4] == 'LT':
return 'latitude', None, None
else:
return 'scalar', None, None
def transform_contour_set_inplace(cset, transform):
"""
Transform a contour set in-place using a specified
:class:`matplotlib.transform.Transform`
Using transforms with the native Matplotlib contour/contourf can be slow if
the transforms have a non-negligible overhead (which is the case for
WCS/SkyCoord transforms) since the transform is called for each individual
contour line. It is more efficient to stack all the contour lines together
temporarily and transform them in one go.
"""
# The contours are represented as paths grouped into levels. Each can have
# one or more paths. The approach we take here is to stack the vertices of
# all paths and transform them in one go. The pos_level list helps us keep
# track of where the set of segments for each overall contour level ends.
# The pos_segments list helps us keep track of where each segmnt ends for
# each contour level.
all_paths = []
pos_level = []
pos_segments = []
for collection in cset.collections:
paths = collection.get_paths()
if len(paths) == 0:
continue
all_paths.append(paths)
# The last item in pos isn't needed for np.split and in fact causes
# issues if we keep it because it will cause an extra empty array to be
# returned.
pos = np.cumsum([len(x) for x in paths])
pos_segments.append(pos[:-1])
pos_level.append(pos[-1])
# As above the last item isn't needed
pos_level = np.cumsum(pos_level)[:-1]
# Stack all the segments into a single (n, 2) array
vertices = [path.vertices for paths in all_paths for path in paths]
if len(vertices) > 0:
vertices = np.concatenate(vertices)
else:
return
# Transform all coordinates in one go
vertices = transform.transform(vertices)
# Split up into levels again
vertices = np.split(vertices, pos_level)
# Now re-populate the segments in the line collections
for ilevel, vert in enumerate(vertices):
vert = np.split(vert, pos_segments[ilevel])
for iseg, ivert in enumerate(vert):
all_paths[ilevel][iseg].vertices = ivert
|
849c617a18388436cf7d6ebbc4dda720258c6d744b7ff3d62b9d98a2f8ff76cf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib import rcParams
from matplotlib.text import Text
import matplotlib.transforms as mtransforms
from .frame import RectangularFrame
class AxisLabels(Text):
def __init__(self, frame, minpad=1, *args, **kwargs):
# Use rcParams if the following parameters were not specified explicitly
if 'weight' not in kwargs:
kwargs['weight'] = rcParams['axes.labelweight']
if 'size' not in kwargs:
kwargs['size'] = rcParams['axes.labelsize']
if 'color' not in kwargs:
kwargs['color'] = rcParams['axes.labelcolor']
self._frame = frame
super().__init__(*args, **kwargs)
self.set_clip_on(True)
self.set_visible_axes('all')
self.set_ha('center')
self.set_va('center')
self._minpad = minpad
self._visibility_rule = 'labels'
def get_minpad(self, axis):
try:
return self._minpad[axis]
except TypeError:
return self._minpad
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
def get_visible_axes(self):
if self._visible_axes == 'all':
return self._frame.keys()
else:
return [x for x in self._visible_axes if x in self._frame]
def set_minpad(self, minpad):
self._minpad = minpad
def set_visibility_rule(self, value):
allowed = ['always', 'labels', 'ticks']
if value not in allowed:
raise ValueError("Axis label visibility rule must be one of{}".format(' / '.join(allowed)))
self._visibility_rule = value
def get_visibility_rule(self):
return self._visibility_rule
def draw(self, renderer, bboxes, ticklabels_bbox,
coord_ticklabels_bbox, ticks_locs, visible_ticks):
if not self.get_visible():
return
text_size = renderer.points_to_pixels(self.get_size())
for axis in self.get_visible_axes():
# Flatten the bboxes for all coords and all axes
ticklabels_bbox_list = []
for bbcoord in ticklabels_bbox.values():
for bbaxis in bbcoord.values():
ticklabels_bbox_list += bbaxis
if self.get_visibility_rule() == 'ticks':
if not ticks_locs[axis]:
continue
elif self.get_visibility_rule() == 'labels':
if not coord_ticklabels_bbox:
continue
padding = text_size * self.get_minpad(axis)
# Find position of the axis label. For now we pick the mid-point
# along the path but in future we could allow this to be a
# parameter.
x_disp, y_disp = self._frame[axis].pixel[:, 0], self._frame[axis].pixel[:, 1]
d = np.hstack([0., np.cumsum(np.sqrt(np.diff(x_disp) ** 2 + np.diff(y_disp) ** 2))])
xcen = np.interp(d[-1] / 2., d, x_disp)
ycen = np.interp(d[-1] / 2., d, y_disp)
# Find segment along which the mid-point lies
imin = np.searchsorted(d, d[-1] / 2.) - 1
# Find normal of the axis label facing outwards on that segment
normal_angle = self._frame[axis].normal_angle[imin] + 180.
label_angle = (normal_angle - 90.) % 360.
if 135 < label_angle < 225:
label_angle += 180
self.set_rotation(label_angle)
# Find label position by looking at the bounding box of ticks'
# labels and the image. It sets the default padding at 1 times the
# axis label font size which can also be changed by setting
# the minpad parameter.
if isinstance(self._frame, RectangularFrame):
if len(ticklabels_bbox_list) > 0 and ticklabels_bbox_list[0] is not None:
coord_ticklabels_bbox[axis] = [mtransforms.Bbox.union(ticklabels_bbox_list)]
else:
coord_ticklabels_bbox[axis] = [None]
if axis == 'l':
if axis in visible_ticks and coord_ticklabels_bbox[axis][0] is not None:
left = coord_ticklabels_bbox[axis][0].xmin
else:
left = xcen
xpos = left - padding
self.set_position((xpos, ycen))
elif axis == 'r':
if axis in visible_ticks and coord_ticklabels_bbox[axis][0] is not None:
right = coord_ticklabels_bbox[axis][0].x1
else:
right = xcen
xpos = right + padding
self.set_position((xpos, ycen))
elif axis == 'b':
if axis in visible_ticks and coord_ticklabels_bbox[axis][0] is not None:
bottom = coord_ticklabels_bbox[axis][0].ymin
else:
bottom = ycen
ypos = bottom - padding
self.set_position((xcen, ypos))
elif axis == 't':
if axis in visible_ticks and coord_ticklabels_bbox[axis][0] is not None:
top = coord_ticklabels_bbox[axis][0].y1
else:
top = ycen
ypos = top + padding
self.set_position((xcen, ypos))
else: # arbitrary axis
dx = np.cos(np.radians(normal_angle)) * (padding + text_size * 1.5)
dy = np.sin(np.radians(normal_angle)) * (padding + text_size * 1.5)
self.set_position((xcen + dx, ycen + dy))
super().draw(renderer)
bb = super().get_window_extent(renderer)
bboxes.append(bb)
|
ce82bc246bdafd37345cfaa35082b2f28971d1655a86a68405f1bddf071da8fe | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from astropy import units as u
# Algorithm inspired by PGSBOX from WCSLIB by M. Calabretta
LONLAT = {'longitude', 'latitude'}
def wrap_180(values):
values_new = values % 360.
with np.errstate(invalid='ignore'):
values_new[values_new > 180.] -= 360
return values_new
def find_coordinate_range(transform, extent, coord_types, coord_units):
"""
Find the range of coordinates to use for ticks/grids
Parameters
----------
transform : func
Function to transform pixel to world coordinates. Should take two
values (the pixel coordinates) and return two values (the world
coordinates).
extent : iterable
The range of the image viewport in pixel coordinates, given as [xmin,
xmax, ymin, ymax].
coord_types : list of str
Whether each coordinate is a ``'longitude'``, ``'latitude'``, or
``'scalar'`` value.
coord_units : list of `astropy.units.Unit`
The units for each coordinate
"""
# Sample coordinates on a NX x NY grid.
from . import conf
nx = ny = conf.coordinate_range_samples
x = np.linspace(extent[0], extent[1], nx + 1)
y = np.linspace(extent[2], extent[3], ny + 1)
xp, yp = np.meshgrid(x, y)
world = transform.transform(np.vstack([xp.ravel(), yp.ravel()]).transpose())
ranges = []
for coord_index, coord_type in enumerate(coord_types):
xw = world[:, coord_index].reshape(xp.shape)
if coord_type in LONLAT:
unit = coord_units[coord_index]
xw = xw * unit.to(u.deg)
# Iron out coordinates along first row
wjump = xw[0, 1:] - xw[0, :-1]
with np.errstate(invalid='ignore'):
reset = np.abs(wjump) > 180.
if np.any(reset):
wjump = wjump + np.sign(wjump) * 180.
wjump = 360. * (wjump / 360.).astype(int)
xw[0, 1:][reset] -= wjump[reset]
# Now iron out coordinates along all columns, starting with first row.
wjump = xw[1:] - xw[:1]
with np.errstate(invalid='ignore'):
reset = np.abs(wjump) > 180.
if np.any(reset):
wjump = wjump + np.sign(wjump) * 180.
wjump = 360. * (wjump / 360.).astype(int)
xw[1:][reset] -= wjump[reset]
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min = np.nanmin(xw)
xw_max = np.nanmax(xw)
# Check if range is smaller when normalizing to the range 0 to 360
if coord_type in LONLAT:
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min_check = np.nanmin(xw % 360.)
xw_max_check = np.nanmax(xw % 360.)
if xw_max_check - xw_min_check <= xw_max - xw_min < 360.:
xw_min = xw_min_check
xw_max = xw_max_check
# Check if range is smaller when normalizing to the range -180 to 180
if coord_type in LONLAT:
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min_check = np.nanmin(wrap_180(xw))
xw_max_check = np.nanmax(wrap_180(xw))
if xw_max_check - xw_min_check < 360. and xw_max - xw_min >= xw_max_check - xw_min_check:
xw_min = xw_min_check
xw_max = xw_max_check
x_range = xw_max - xw_min
if coord_type == 'longitude':
if x_range > 300.:
xw_min = 0.
xw_max = 360 - np.spacing(360.)
elif xw_min < 0.:
xw_min = max(-180., xw_min - 0.1 * x_range)
xw_max = min(+180., xw_max + 0.1 * x_range)
else:
xw_min = max(0., xw_min - 0.1 * x_range)
xw_max = min(360., xw_max + 0.1 * x_range)
elif coord_type == 'latitude':
xw_min = max(-90., xw_min - 0.1 * x_range)
xw_max = min(+90., xw_max + 0.1 * x_range)
if coord_type in LONLAT:
xw_min *= u.deg.to(unit)
xw_max *= u.deg.to(unit)
ranges.append((xw_min, xw_max))
return ranges
|
9e8ed6dd6e2b9810a96533cbb0a9982cb8049df443ec30a2884de01941dc919f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
from collections import OrderedDict
import numpy as np
from matplotlib import rcParams
from matplotlib.lines import Line2D, Path
from matplotlib.patches import PathPatch
__all__ = ['Spine', 'BaseFrame', 'RectangularFrame', 'EllipticalFrame']
class Spine:
"""
A single side of an axes.
This does not need to be a straight line, but represents a 'side' when
determining which part of the frame to put labels and ticks on.
"""
def __init__(self, parent_axes, transform):
self.parent_axes = parent_axes
self.transform = transform
self.data = None
self.pixel = None
self.world = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = value
self._pixel = self.parent_axes.transData.transform(self._data)
self._world = self.transform.transform(self._data)
self._update_normal()
@property
def pixel(self):
return self._pixel
@pixel.setter
def pixel(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = self.parent_axes.transData.inverted().transform(self._data)
self._pixel = value
self._world = self.transform.transform(self._data)
self._update_normal()
@property
def world(self):
return self._world
@world.setter
def world(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = self.transform.transform(value)
self._pixel = self.parent_axes.transData.transform(self._data)
self._world = value
self._update_normal()
def _update_normal(self):
# Find angle normal to border and inwards, in display coordinate
dx = self.pixel[1:, 0] - self.pixel[:-1, 0]
dy = self.pixel[1:, 1] - self.pixel[:-1, 1]
self.normal_angle = np.degrees(np.arctan2(dx, -dy))
class BaseFrame(OrderedDict, metaclass=abc.ABCMeta):
"""
Base class for frames, which are collections of
:class:`~astropy.visualization.wcsaxes.frame.Spine` instances.
"""
def __init__(self, parent_axes, transform, path=None):
super().__init__()
self.parent_axes = parent_axes
self._transform = transform
self._linewidth = rcParams['axes.linewidth']
self._color = rcParams['axes.edgecolor']
self._path = path
for axis in self.spine_names:
self[axis] = Spine(parent_axes, transform)
@property
def origin(self):
ymin, ymax = self.parent_axes.get_ylim()
return 'lower' if ymin < ymax else 'upper'
@property
def transform(self):
return self._transform
@transform.setter
def transform(self, value):
self._transform = value
for axis in self:
self[axis].transform = value
def _update_patch_path(self):
self.update_spines()
x, y = [], []
for axis in self:
x.append(self[axis].data[:, 0])
y.append(self[axis].data[:, 1])
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
@property
def patch(self):
self._update_patch_path()
return PathPatch(self._path, transform=self.parent_axes.transData,
facecolor=rcParams['axes.facecolor'], edgecolor='white')
def draw(self, renderer):
for axis in self:
x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000)
line.draw(renderer)
def sample(self, n_samples):
self.update_spines()
spines = OrderedDict()
for axis in self:
data = self[axis].data
p = np.linspace(0., 1., data.shape[0])
p_new = np.linspace(0., 1., n_samples)
spines[axis] = Spine(self.parent_axes, self.transform)
spines[axis].data = np.array([np.interp(p_new, p, data[:, 0]),
np.interp(p_new, p, data[:, 1])]).transpose()
return spines
def set_color(self, color):
"""
Sets the color of the frame.
Parameters
----------
color : string
The color of the frame.
"""
self._color = color
def get_color(self):
return self._color
def set_linewidth(self, linewidth):
"""
Sets the linewidth of the frame.
Parameters
----------
linewidth : float
The linewidth of the frame in points.
"""
self._linewidth = linewidth
def get_linewidth(self):
return self._linewidth
@abc.abstractmethod
def update_spines(self):
raise NotImplementedError("")
class RectangularFrame(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = 'brtl'
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self['b'].data = np.array(([xmin, ymin], [xmax, ymin]))
self['r'].data = np.array(([xmax, ymin], [xmax, ymax]))
self['t'].data = np.array(([xmax, ymax], [xmin, ymax]))
self['l'].data = np.array(([xmin, ymax], [xmin, ymin]))
class EllipticalFrame(BaseFrame):
"""
An elliptical frame.
"""
spine_names = 'chv'
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
xmid = 0.5 * (xmax + xmin)
ymid = 0.5 * (ymax + ymin)
dx = xmid - xmin
dy = ymid - ymin
theta = np.linspace(0., 2 * np.pi, 1000)
self['c'].data = np.array([xmid + dx * np.cos(theta),
ymid + dy * np.sin(theta)]).transpose()
self['h'].data = np.array([np.linspace(xmin, xmax, 1000),
np.repeat(ymid, 1000)]).transpose()
self['v'].data = np.array([np.repeat(xmid, 1000),
np.linspace(ymin, ymax, 1000)]).transpose()
def _update_patch_path(self):
"""Override path patch to include only the outer ellipse,
not the major and minor axes in the middle."""
self.update_spines()
vertices = self['c'].data
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
"""Override to draw only the outer ellipse,
not the major and minor axes in the middle.
FIXME: we may want to add a general method to give the user control
over which spines are drawn."""
axis = 'c'
x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000)
line.draw(renderer)
|
40a0583c519dc5613a5598e64d735755880d3f063f1091139ce4b6ed86e763a1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.lines import Path
from astropy.coordinates.angle_utilities import angular_separation
# Tolerance for WCS round-tripping
ROUND_TRIP_TOL = 1e-1
# Tolerance for discontinuities relative to the median
DISCONT_FACTOR = 10.
def get_lon_lat_path(lon_lat, pixel, lon_lat_check):
"""
Draw a curve, taking into account discontinuities.
Parameters
----------
lon_lat : `~numpy.ndarray`
The longitude and latitude values along the curve, given as a (n,2)
array.
pixel : `~numpy.ndarray`
The pixel coordinates corresponding to ``lon_lat``
lon_lat_check : `~numpy.ndarray`
The world coordinates derived from converting from ``pixel``, which is
used to ensure round-tripping.
"""
# In some spherical projections, some parts of the curve are 'behind' or
# 'in front of' the plane of the image, so we find those by reversing the
# transformation and finding points where the result is not consistent.
sep = angular_separation(np.radians(lon_lat[:, 0]),
np.radians(lon_lat[:, 1]),
np.radians(lon_lat_check[:, 0]),
np.radians(lon_lat_check[:, 1]))
with np.errstate(invalid='ignore'):
sep[sep > np.pi] -= 2. * np.pi
mask = np.abs(sep > ROUND_TRIP_TOL)
# Mask values with invalid pixel positions
mask = mask | np.isnan(pixel[:, 0]) | np.isnan(pixel[:, 1])
# We can now start to set up the codes for the Path.
codes = np.zeros(lon_lat.shape[0], dtype=np.uint8)
codes[:] = Path.LINETO
codes[0] = Path.MOVETO
codes[mask] = Path.MOVETO
# Also need to move to point *after* a hidden value
codes[1:][mask[:-1]] = Path.MOVETO
# We now go through and search for discontinuities in the curve that would
# be due to the curve going outside the field of view, invalid WCS values,
# or due to discontinuities in the projection.
# We start off by pre-computing the step in pixel coordinates from one
# point to the next. The idea is to look for large jumps that might indicate
# discontinuities.
step = np.sqrt((pixel[1:, 0] - pixel[:-1, 0]) ** 2 +
(pixel[1:, 1] - pixel[:-1, 1]) ** 2)
# We search for discontinuities by looking for places where the step
# is larger by more than a given factor compared to the median
# discontinuous = step > DISCONT_FACTOR * np.median(step)
discontinuous = step[1:] > DISCONT_FACTOR * step[:-1]
# Skip over discontinuities
codes[2:][discontinuous] = Path.MOVETO
# The above missed the first step, so check that too
if step[0] > DISCONT_FACTOR * step[1]:
codes[1] = Path.MOVETO
# Create the path
path = Path(pixel, codes=codes)
return path
def get_gridline_path(world, pixel):
"""
Draw a grid line
Parameters
----------
world : `~numpy.ndarray`
The longitude and latitude values along the curve, given as a (n,2)
array.
pixel : `~numpy.ndarray`
The pixel coordinates corresponding to ``lon_lat``
"""
# Mask values with invalid pixel positions
mask = np.isnan(pixel[:, 0]) | np.isnan(pixel[:, 1])
# We can now start to set up the codes for the Path.
codes = np.zeros(world.shape[0], dtype=np.uint8)
codes[:] = Path.LINETO
codes[0] = Path.MOVETO
codes[mask] = Path.MOVETO
# Also need to move to point *after* a hidden value
codes[1:][mask[:-1]] = Path.MOVETO
# We now go through and search for discontinuities in the curve that would
# be due to the curve going outside the field of view, invalid WCS values,
# or due to discontinuities in the projection.
# Create the path
path = Path(pixel, codes=codes)
return path
|
bd1381880f3ca15a2c3d8ddf9ea39107336b2a777afb2dded68a8c42b3676c16 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose
try:
import matplotlib.pyplot as plt
HAS_PLT = True
except ImportError:
HAS_PLT = False
try:
import scipy # noqa
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
import pytest
import numpy as np
from astropy.visualization import hist
from astropy.stats import histogram
@pytest.mark.skipif('not HAS_PLT')
def test_hist_basic(rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(100)
for range in [None, (-2, 2)]:
n1, bins1, patches1 = plt.hist(x, 10, range=range)
n2, bins2, patches2 = hist(x, 10, range=range)
assert_allclose(n1, n2)
assert_allclose(bins1, bins2)
@pytest.mark.skipif('not HAS_PLT')
def test_hist_specify_ax(rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(100)
fig, ax = plt.subplots(2)
n1, bins1, patches1 = hist(x, 10, ax=ax[0])
assert patches1[0].axes is ax[0]
n2, bins2, patches2 = hist(x, 10, ax=ax[1])
assert patches2[0].axes is ax[1]
@pytest.mark.skipif('not HAS_PLT')
def test_hist_autobin(rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(100)
# 'knuth' bintype depends on scipy that is optional dependency
if HAS_SCIPY:
bintypes = [10, np.arange(-3, 3, 10), 'knuth', 'scott',
'freedman', 'blocks']
else:
bintypes = [10, np.arange(-3, 3, 10), 'scott',
'freedman', 'blocks']
for bintype in bintypes:
for range in [None, (-3, 3)]:
n1, bins1 = histogram(x, bintype, range=range)
n2, bins2, patches = hist(x, bintype, range=range)
assert_allclose(n1, n2)
assert_allclose(bins1, bins2)
def test_histogram_pathological_input():
# Regression test for https://github.com/astropy/astropy/issues/7758
# The key feature of the data below is that one of the points is very,
# very different than the rest. That leads to a large number of bins.
data = [9.99999914e+05, -8.31312483e-03, 6.52755852e-02, 1.43104653e-03,
-2.26311017e-02, 2.82660007e-03, 1.80307521e-02, 9.26294279e-03,
5.06606026e-02, 2.05418011e-03]
with pytest.raises(ValueError):
hist(data, bins='freedman', max_bins=10000)
|
801dbaa9aeb5344ec737ac41832c0df40568790deeec814fd16df3bb2c48a6f7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.utils import NumpyRNGContext
from astropy.visualization.interval import (ManualInterval, MinMaxInterval, PercentileInterval,
AsymmetricPercentileInterval, ZScaleInterval)
class TestInterval:
data = np.linspace(-20., 60., 100)
def test_manual(self):
interval = ManualInterval(-10., +15.)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -10.)
np.testing.assert_allclose(vmax, +15.)
def test_manual_defaults(self):
interval = ManualInterval(vmin=-10.)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -10.)
np.testing.assert_allclose(vmax, np.max(self.data))
interval = ManualInterval(vmax=15.)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, np.min(self.data))
np.testing.assert_allclose(vmax, 15.)
def test_manual_zero_limit(self):
# Regression test for a bug that caused ManualInterval to compute the
# limit (min or max) if it was set to zero.
interval = ManualInterval(vmin=0, vmax=0)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, 0)
np.testing.assert_allclose(vmax, 0)
def test_manual_defaults_with_nan(self):
interval = ManualInterval()
data = np.copy(self.data)
data[0] = np.nan
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -20)
np.testing.assert_allclose(vmax, +60)
def test_minmax(self):
interval = MinMaxInterval()
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -20.)
np.testing.assert_allclose(vmax, +60.)
def test_percentile(self):
interval = PercentileInterval(62.2)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -4.88)
np.testing.assert_allclose(vmax, 44.88)
def test_asymmetric_percentile(self):
interval = AsymmetricPercentileInterval(10.5, 70.5)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -11.6)
np.testing.assert_allclose(vmax, 36.4)
def test_asymmetric_percentile_nsamples(self):
with NumpyRNGContext(12345):
interval = AsymmetricPercentileInterval(10.5, 70.5, n_samples=20)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -14.367676767676768)
np.testing.assert_allclose(vmax, 40.266666666666666)
class TestIntervalList(TestInterval):
# Make sure intervals work with lists
data = np.linspace(-20., 60., 100).tolist()
class TestInterval2D(TestInterval):
# Make sure intervals work with 2d arrays
data = np.linspace(-20., 60., 100).reshape(100, 1)
def test_zscale():
np.random.seed(42)
data = np.random.randn(100, 100) * 5 + 10
interval = ZScaleInterval()
vmin, vmax = interval.get_limits(data)
np.testing.assert_allclose(vmin, -9.6, atol=0.1)
np.testing.assert_allclose(vmax, 25.4, atol=0.1)
data = list(range(1000)) + [np.nan]
interval = ZScaleInterval()
vmin, vmax = interval.get_limits(data)
np.testing.assert_allclose(vmin, 0, atol=0.1)
np.testing.assert_allclose(vmax, 999, atol=0.1)
data = list(range(100))
interval = ZScaleInterval()
vmin, vmax = interval.get_limits(data)
np.testing.assert_allclose(vmin, 0, atol=0.1)
np.testing.assert_allclose(vmax, 99, atol=0.1)
def test_integers():
# Need to make sure integers get cast to float
interval = MinMaxInterval()
values = interval([1, 3, 4, 5, 6])
np.testing.assert_allclose(values, [0., 0.4, 0.6, 0.8, 1.0])
# Don't accept integer array in output
out = np.zeros(5, dtype=int)
with pytest.raises(TypeError) as exc:
values = interval([1, 3, 4, 5, 6], out=out)
assert exc.value.args[0] == ("Can only do in-place scaling for "
"floating-point arrays")
# But integer input and floating point output is fine
out = np.zeros(5, dtype=float)
interval([1, 3, 4, 5, 6], out=out)
np.testing.assert_allclose(out, [0., 0.4, 0.6, 0.8, 1.0])
def test_constant_data():
"""Test intervals with constant data (avoiding divide-by-zero)."""
shape = (10, 10)
data = np.ones(shape)
interval = MinMaxInterval()
limits = interval.get_limits(data)
values = interval(data)
np.testing.assert_allclose(limits, (1., 1.))
np.testing.assert_allclose(values, np.zeros(shape))
|
b24c99796fc0f74175a5980e273dba8f07587d032352ebc3f9dfa10cd2b63b6d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.visualization.stretch import (LinearStretch, SqrtStretch, PowerStretch,
PowerDistStretch, SquaredStretch, LogStretch,
AsinhStretch, SinhStretch, HistEqStretch,
ContrastBiasStretch)
DATA = np.array([0.00, 0.25, 0.50, 0.75, 1.00])
RESULTS = {}
RESULTS[LinearStretch()] = np.array([0.00, 0.25, 0.50, 0.75, 1.00])
RESULTS[LinearStretch(intercept=0.5) + LinearStretch(slope=0.5)] = \
np.array([0.5, 0.625, 0.75, 0.875, 1.])
RESULTS[SqrtStretch()] = np.array([0., 0.5, 0.70710678, 0.8660254, 1.])
RESULTS[SquaredStretch()] = np.array([0., 0.0625, 0.25, 0.5625, 1.])
RESULTS[PowerStretch(0.5)] = np.array([0., 0.5, 0.70710678, 0.8660254, 1.])
RESULTS[PowerDistStretch()] = np.array([0., 0.004628, 0.030653, 0.177005, 1.])
RESULTS[LogStretch()] = np.array([0., 0.799776, 0.899816, 0.958408, 1.])
RESULTS[AsinhStretch()] = np.array([0., 0.549402, 0.77127, 0.904691, 1.])
RESULTS[SinhStretch()] = np.array([0., 0.082085, 0.212548, 0.46828, 1.])
RESULTS[ContrastBiasStretch(contrast=2., bias=0.4)] = np.array([-0.3, 0.2,
0.7, 1.2,
1.7])
RESULTS[HistEqStretch(DATA)] = DATA
RESULTS[HistEqStretch(DATA[::-1])] = DATA
RESULTS[HistEqStretch(DATA ** 0.5)] = np.array([0., 0.125, 0.25, 0.5674767,
1.])
class TestStretch:
@pytest.mark.parametrize('stretch', RESULTS.keys())
def test_no_clip(self, stretch):
np.testing.assert_allclose(stretch(DATA, clip=False),
RESULTS[stretch], atol=1.e-6)
@pytest.mark.parametrize('ndim', [2, 3])
@pytest.mark.parametrize('stretch', RESULTS.keys())
def test_clip_ndimensional(self, stretch, ndim):
new_shape = DATA.shape + (1,) * ndim
np.testing.assert_allclose(stretch(DATA.reshape(new_shape),
clip=True).ravel(),
np.clip(RESULTS[stretch], 0., 1),
atol=1.e-6)
@pytest.mark.parametrize('stretch', RESULTS.keys())
def test_clip(self, stretch):
np.testing.assert_allclose(stretch(DATA, clip=True),
np.clip(RESULTS[stretch], 0., 1),
atol=1.e-6)
@pytest.mark.parametrize('stretch', RESULTS.keys())
def test_inplace(self, stretch):
data_in = DATA.copy()
result = np.zeros(DATA.shape)
stretch(data_in, out=result, clip=False)
np.testing.assert_allclose(result, RESULTS[stretch], atol=1.e-6)
np.testing.assert_allclose(data_in, DATA)
@pytest.mark.parametrize('stretch', RESULTS.keys())
def test_round_trip(self, stretch):
np.testing.assert_allclose(stretch.inverse(stretch(DATA, clip=False),
clip=False), DATA)
@pytest.mark.parametrize('stretch', RESULTS.keys())
def test_inplace_roundtrip(self, stretch):
result = np.zeros(DATA.shape)
stretch(DATA, out=result, clip=False)
stretch.inverse(result, out=result, clip=False)
np.testing.assert_allclose(result, DATA)
@pytest.mark.parametrize('stretch', RESULTS.keys())
def test_double_inverse(self, stretch):
np.testing.assert_allclose(stretch.inverse.inverse(DATA),
stretch(DATA), atol=1.e-6)
def test_inverted(self):
stretch_1 = SqrtStretch().inverse
stretch_2 = PowerStretch(2)
np.testing.assert_allclose(stretch_1(DATA),
stretch_2(DATA))
def test_chaining(self):
stretch_1 = SqrtStretch() + SqrtStretch()
stretch_2 = PowerStretch(0.25)
stretch_3 = PowerStretch(4.)
np.testing.assert_allclose(stretch_1(DATA),
stretch_2(DATA))
np.testing.assert_allclose(stretch_1.inverse(DATA),
stretch_3(DATA))
def test_clip_invalid():
stretch = SqrtStretch()
values = stretch([-1., 0., 0.5, 1., 1.5])
np.testing.assert_allclose(values, [0., 0., 0.70710678, 1., 1.])
values = stretch([-1., 0., 0.5, 1., 1.5], clip=False)
np.testing.assert_allclose(values, [np.nan, 0., 0.70710678, 1., 1.2247448])
|
db6ed4d309d92f0074831faa142e6032da258501d841a6ad1b3f21d00ab70158 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import pytest
try:
import matplotlib.pyplot as plt
except ImportError:
HAS_PLT = False
else:
HAS_PLT = True
from astropy import units as u
from astropy.visualization.units import quantity_support
@pytest.mark.skipif('not HAS_PLT')
def test_units():
plt.figure()
with quantity_support():
buff = io.BytesIO()
plt.plot([1, 2, 3] * u.m, [3, 4, 5] * u.kg, label='label')
plt.plot([105, 210, 315] * u.cm, [3050, 3025, 3010] * u.g)
plt.legend()
# Also test fill_between, which requires actual conversion to ndarray
# with numpy >=1.10 (#4654).
plt.fill_between([1, 3] * u.m, [3, 5] * u.kg, [3050, 3010] * u.g)
plt.savefig(buff, format='svg')
assert plt.gca().xaxis.get_units() == u.m
assert plt.gca().yaxis.get_units() == u.kg
plt.clf()
@pytest.mark.skipif('not HAS_PLT')
def test_units_errbarr():
pytest.importorskip("matplotlib", minversion="2.2")
plt.figure()
with quantity_support():
x = [1, 2, 3] * u.s
y = [1, 2, 3] * u.m
yerr = [3, 2, 1] * u.cm
fig, ax = plt.subplots()
ax.errorbar(x, y, yerr=yerr)
assert ax.xaxis.get_units() == u.s
assert ax.yaxis.get_units() == u.m
plt.clf()
@pytest.mark.skipif('not HAS_PLT')
def test_incompatible_units():
# NOTE: minversion check does not work properly for matplotlib dev.
try:
# https://github.com/matplotlib/matplotlib/pull/13005
from matplotlib.units import ConversionError
except ImportError:
err_type = u.UnitConversionError
else:
err_type = ConversionError
plt.figure()
with quantity_support():
plt.plot([1, 2, 3] * u.m)
with pytest.raises(err_type):
plt.plot([105, 210, 315] * u.kg)
plt.clf()
|
c30eeed1ed53e0e4d9f4c28924f0fe441a63fd0d3553cf0f7e5cc17b507031c2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for RGB Images
"""
import sys
import os
import tempfile
import pytest
import numpy as np
from numpy.testing import assert_equal
from astropy.convolution import convolve, Gaussian2DKernel
from astropy.visualization import lupton_rgb
try:
import matplotlib # noqa
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
# Set display=True to get matplotlib imshow windows to help with debugging.
display = False
def display_rgb(rgb, title=None):
"""Display an rgb image using matplotlib (useful for debugging)"""
import matplotlib.pyplot as plt
plt.imshow(rgb, interpolation='nearest', origin='lower')
if title:
plt.title(title)
plt.show()
return plt
def saturate(image, satValue):
"""
Return image with all points above satValue set to NaN.
Simulates saturation on an image, so we can test 'replace_saturated_pixels'
"""
result = image.copy()
saturated = image > satValue
result[saturated] = np.nan
return result
def random_array(dtype, N=100):
return np.array(np.random.random(10)*100, dtype=dtype)
def test_compute_intensity_1_float():
image_r = random_array(np.float64)
intensity = lupton_rgb.compute_intensity(image_r)
assert image_r.dtype == intensity.dtype
assert_equal(image_r, intensity)
def test_compute_intensity_1_uint():
image_r = random_array(np.uint8)
intensity = lupton_rgb.compute_intensity(image_r)
assert image_r.dtype == intensity.dtype
assert_equal(image_r, intensity)
def test_compute_intensity_3_float():
image_r = random_array(np.float64)
image_g = random_array(np.float64)
image_b = random_array(np.float64)
intensity = lupton_rgb.compute_intensity(image_r, image_g, image_b)
assert image_r.dtype == intensity.dtype
assert_equal(intensity, (image_r+image_g+image_b)/3.0)
def test_compute_intensity_3_uint():
image_r = random_array(np.uint8)
image_g = random_array(np.uint8)
image_b = random_array(np.uint8)
intensity = lupton_rgb.compute_intensity(image_r, image_g, image_b)
assert image_r.dtype == intensity.dtype
assert_equal(intensity, (image_r+image_g+image_b)//3)
class TestLuptonRgb:
"""A test case for Rgb"""
def setup_method(self, method):
np.random.seed(1000) # so we always get the same images.
self.min_, self.stretch_, self.Q = 0, 5, 20 # asinh
width, height = 85, 75
self.width = width
self.height = height
shape = (width, height)
image_r = np.zeros(shape)
image_g = np.zeros(shape)
image_b = np.zeros(shape)
# pixel locations, values and colors
points = [[15, 15], [50, 45], [30, 30], [45, 15]]
values = [1000, 5500, 600, 20000]
g_r = [1.0, -1.0, 1.0, 1.0]
r_i = [2.0, -0.5, 2.5, 1.0]
# Put pixels in the images.
for p, v, gr, ri in zip(points, values, g_r, r_i):
image_r[p[0], p[1]] = v*pow(10, 0.4*ri)
image_g[p[0], p[1]] = v*pow(10, 0.4*gr)
image_b[p[0], p[1]] = v
# convolve the image with a reasonable PSF, and add Gaussian background noise
def convolve_with_noise(image, psf):
convolvedImage = convolve(image, psf, boundary='extend', normalize_kernel=True)
randomImage = np.random.normal(0, 2, image.shape)
return randomImage + convolvedImage
psf = Gaussian2DKernel(2.5)
self.image_r = convolve_with_noise(image_r, psf)
self.image_g = convolve_with_noise(image_g, psf)
self.image_b = convolve_with_noise(image_b, psf)
def test_Asinh(self):
"""Test creating an RGB image using an asinh stretch"""
asinhMap = lupton_rgb.AsinhMapping(self.min_, self.stretch_, self.Q)
rgbImage = asinhMap.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscale(self):
"""Test creating an RGB image using an asinh stretch estimated using zscale"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensity(self):
"""Test creating an RGB image using an asinh stretch estimated using zscale on the intensity"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensityPedestal(self):
"""Test creating an RGB image using an asinh stretch estimated using zscale on the intensity
where the images each have a pedestal added"""
pedestal = [100, 400, -400]
self.image_r += pedestal[0]
self.image_g += pedestal[1]
self.image_b += pedestal[2]
map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b, pedestal=pedestal)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensityBW(self):
"""Test creating a black-and-white image using an asinh stretch estimated
using zscale on the intensity"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r)
rgbImage = map.make_rgb_image(self.image_r, self.image_r, self.image_r)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
@pytest.mark.skipif('not HAS_MATPLOTLIB')
def test_make_rgb(self):
"""Test the function that does it all"""
satValue = 1000.0
with tempfile.NamedTemporaryFile(suffix=".png") as temp:
red = saturate(self.image_r, satValue)
green = saturate(self.image_g, satValue)
blue = saturate(self.image_b, satValue)
lupton_rgb.make_lupton_rgb(red, green, blue, self.min_, self.stretch_, self.Q, filename=temp)
assert os.path.exists(temp.name)
def test_make_rgb_saturated_fix(self):
pytest.skip('saturation correction is not implemented')
satValue = 1000.0
# TODO: Cannot test with these options yet, as that part of the code is not implemented.
with tempfile.NamedTemporaryFile(suffix=".png") as temp:
red = saturate(self.image_r, satValue)
green = saturate(self.image_g, satValue)
blue = saturate(self.image_b, satValue)
lupton_rgb.make_lupton_rgb(red, green, blue, self.min_, self.stretch_, self.Q,
saturated_border_width=1, saturated_pixel_value=2000,
filename=temp)
def test_linear(self):
"""Test using a specified linear stretch"""
map = lupton_rgb.LinearMapping(-8.45, 13.44)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_linear_min_max(self):
"""Test using a min/max linear stretch determined from one image"""
map = lupton_rgb.LinearMapping(image=self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_saturated(self):
"""Test interpolationolating saturated pixels"""
pytest.skip('replaceSaturatedPixels is not implemented in astropy yet')
satValue = 1000.0
self.image_r = saturate(self.image_r, satValue)
self.image_g = saturate(self.image_g, satValue)
self.image_b = saturate(self.image_b, satValue)
lupton_rgb.replaceSaturatedPixels(self.image_r, self.image_g, self.image_b, 1, 2000)
# Check that we replaced those NaNs with some reasonable value
assert np.isfinite(self.image_r.getImage().getArray()).all()
assert np.isfinite(self.image_g.getImage().getArray()).all()
assert np.isfinite(self.image_b.getImage().getArray()).all()
# Prepare for generating an output file
self.imagesR = self.imagesR.getImage()
self.imagesR = self.imagesG.getImage()
self.imagesR = self.imagesB.getImage()
asinhMap = lupton_rgb.AsinhMapping(self.min_, self.stretch_, self.Q)
rgbImage = asinhMap.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_different_shapes_asserts(self):
with pytest.raises(ValueError) as excinfo:
# just swap the dimensions to get a differently-shaped 'r'
image_r = self.image_r.reshape(self.height, self.width)
lupton_rgb.make_lupton_rgb(image_r, self.image_g, self.image_b)
assert "shapes must match" in str(excinfo.value)
|
ad2e3eabaf7299ce69acac9aa19576c549ea61263f100332a064ad1795299f17 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy import ma
from numpy.testing import assert_allclose
from astropy.visualization.mpl_normalize import ImageNormalize, simple_norm, imshow_norm
from astropy.visualization.interval import ManualInterval
from astropy.visualization.stretch import SqrtStretch
try:
import matplotlib # pylint: disable=W0611
from matplotlib import pyplot as plt
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
DATA = np.linspace(0., 15., 6)
DATA2 = np.arange(3)
DATA2SCL = 0.5 * DATA2
@pytest.mark.skipif('HAS_MATPLOTLIB')
def test_normalize_error_message():
with pytest.raises(ImportError) as exc:
ImageNormalize()
assert (exc.value.args[0] == "matplotlib is required in order to use "
"this class.")
@pytest.mark.skipif('not HAS_MATPLOTLIB')
class TestNormalize:
def test_invalid_interval(self):
with pytest.raises(TypeError):
ImageNormalize(vmin=2., vmax=10., interval=ManualInterval,
clip=True)
def test_invalid_stretch(self):
with pytest.raises(TypeError):
ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch,
clip=True)
def test_scalar(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(data=6, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
assert_allclose(norm(6), 0.70710678)
assert_allclose(norm(6), norm2(6))
def test_clip(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
output = norm(DATA)
expected = [0., 0.35355339, 0.70710678, 0.93541435, 1., 1.]
assert_allclose(output, expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(output, norm2(DATA))
def test_noclip(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=False)
output = norm(DATA)
expected = [np.nan, 0.35355339, 0.70710678, 0.93541435, 1.11803399,
1.27475488]
assert_allclose(output, expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])
assert_allclose(output, norm2(DATA))
def test_implicit_autoscale(self):
norm = ImageNormalize(vmin=None, vmax=10., stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(DATA, interval=ManualInterval(None, 10),
stretch=SqrtStretch(), clip=False)
output = norm(DATA)
assert norm.vmin == np.min(DATA)
assert norm.vmax == 10.
assert_allclose(output, norm2(DATA))
norm = ImageNormalize(vmin=2., vmax=None, stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, None),
stretch=SqrtStretch(), clip=False)
output = norm(DATA)
assert norm.vmin == 2.
assert norm.vmax == np.max(DATA)
assert_allclose(output, norm2(DATA))
def test_masked_clip(self):
mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(mdata, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
output = norm(mdata)
expected = [0., 0.35355339, 1., 0.93541435, 1., 1.]
assert_allclose(output.filled(-10), expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(output, norm2(mdata))
def test_masked_noclip(self):
mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(mdata, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=False)
output = norm(mdata)
expected = [np.nan, 0.35355339, -10, 0.93541435, 1.11803399,
1.27475488]
assert_allclose(output.filled(-10), expected)
assert_allclose(output.mask, [0, 0, 1, 0, 0, 0])
assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])
assert_allclose(output, norm2(mdata))
@pytest.mark.skipif('not HAS_MATPLOTLIB')
class TestImageScaling:
def test_linear(self):
"""Test linear scaling."""
norm = simple_norm(DATA2, stretch='linear')
assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.e-5)
def test_sqrt(self):
"""Test sqrt scaling."""
norm = simple_norm(DATA2, stretch='sqrt')
assert_allclose(norm(DATA2), np.sqrt(DATA2SCL), atol=0, rtol=1.e-5)
def test_power(self):
"""Test power scaling."""
power = 3.0
norm = simple_norm(DATA2, stretch='power', power=power)
assert_allclose(norm(DATA2), DATA2SCL ** power, atol=0, rtol=1.e-5)
def test_log(self):
"""Test log10 scaling."""
norm = simple_norm(DATA2, stretch='log')
ref = np.log10(1000 * DATA2SCL + 1.0) / np.log10(1001.0)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_log_with_log_a(self):
"""Test log10 scaling with a custom log_a."""
log_a = 100
norm = simple_norm(DATA2, stretch='log', log_a=log_a)
ref = np.log10(log_a * DATA2SCL + 1.0) / np.log10(log_a + 1)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_asinh(self):
"""Test asinh scaling."""
norm = simple_norm(DATA2, stretch='asinh')
ref = np.arcsinh(10 * DATA2SCL) / np.arcsinh(10)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_asinh_with_asinh_a(self):
"""Test asinh scaling with a custom asinh_a."""
asinh_a = 0.5
norm = simple_norm(DATA2, stretch='asinh', asinh_a=asinh_a)
ref = np.arcsinh(DATA2SCL / asinh_a) / np.arcsinh(1. / asinh_a)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_min(self):
"""Test linear scaling."""
norm = simple_norm(DATA2, stretch='linear', min_cut=1.)
assert_allclose(norm(DATA2), [0., 0., 1.], atol=0, rtol=1.e-5)
def test_percent(self):
"""Test percent keywords."""
norm = simple_norm(DATA2, stretch='linear', percent=99.)
assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.e-5)
norm2 = simple_norm(DATA2, stretch='linear', min_percent=0.5,
max_percent=99.5)
assert_allclose(norm(DATA2), norm2(DATA2), atol=0, rtol=1.e-5)
def test_invalid_stretch(self):
"""Test invalid stretch keyword."""
with pytest.raises(ValueError):
simple_norm(DATA2, stretch='invalid')
@pytest.mark.skipif('not HAS_MATPLOTLIB')
def test_imshow_norm():
image = np.random.randn(10, 10)
ax = plt.subplot()
imshow_norm(image, ax=ax)
with pytest.raises(ValueError):
# X and data are the same, can't give both
imshow_norm(image, X=image, ax=ax)
with pytest.raises(ValueError):
# illegal to manually pass in normalization since that defeats the point
imshow_norm(image, ax=ax, norm=ImageNormalize())
imshow_norm(image, ax=ax, vmin=0, vmax=1)
# vmin/vmax "shadow" the MPL versions, so imshow_only_kwargs allows direct-setting
imshow_norm(image, ax=ax, imshow_only_kwargs=dict(vmin=0, vmax=1))
# but it should fail for an argument that is not in ImageNormalize
with pytest.raises(ValueError):
imshow_norm(image, ax=ax, imshow_only_kwargs=dict(cmap='jet'))
# make sure the pyplot version works
imres, norm = imshow_norm(image, ax=None)
assert isinstance(norm, ImageNormalize)
|
538dbdb01be823e895197843fe65cb6073bb24fb22f6441fd5d3deb6a1a1cab2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.io import fits
try:
import matplotlib # pylint: disable=W0611
import matplotlib.image as mpimg
HAS_MATPLOTLIB = True
from astropy.visualization.scripts.fits2bitmap import fits2bitmap, main
except ImportError:
HAS_MATPLOTLIB = False
@pytest.mark.skipif('not HAS_MATPLOTLIB')
class TestFits2Bitmap:
def setup_class(self):
self.filename = 'test.fits'
self.array = np.arange(16384).reshape((128, 128))
def test_function(self, tmpdir):
filename = tmpdir.join(self.filename).strpath
fits.writeto(filename, self.array)
fits2bitmap(filename)
def test_script(self, tmpdir):
filename = tmpdir.join(self.filename).strpath
fits.writeto(filename, self.array)
main([filename, '-e', '0'])
def test_exten_num(self, tmpdir):
filename = tmpdir.join(self.filename).strpath
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(self.array)
hdulist = fits.HDUList([hdu1, hdu2])
hdulist.writeto(filename)
main([filename, '-e', '1'])
def test_exten_name(self, tmpdir):
filename = tmpdir.join(self.filename).strpath
hdu1 = fits.PrimaryHDU()
extname = 'SCI'
hdu2 = fits.ImageHDU(self.array)
hdu2.header['EXTNAME'] = extname
hdulist = fits.HDUList([hdu1, hdu2])
hdulist.writeto(filename)
main([filename, '-e', extname])
@pytest.mark.parametrize('file_exten', ['.gz', '.bz2'])
def test_compressed_fits(self, tmpdir, file_exten):
filename = tmpdir.join('test.fits' + file_exten).strpath
fits.writeto(filename, self.array)
main([filename, '-e', '0'])
def test_orientation(self, tmpdir):
"""
Regression test to check the image vertical orientation/origin.
"""
filename = tmpdir.join(self.filename).strpath
out_filename = 'fits2bitmap_test.png'
out_filename = tmpdir.join(out_filename).strpath
data = np.zeros((32, 32))
data[0:16, :] = 1.
fits.writeto(filename, data)
main([filename, '-e', '0', '-o', out_filename])
img = mpimg.imread(out_filename)
assert img[0, 0, 0] == 0
assert img[31, 31, 0] == 1
|
8a798f4debaa1516c2f2829b06af101b8ba127d9899bfd4727053efdebae0762 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import warnings
from distutils.version import LooseVersion
import pytest
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.contour import QuadContourSet
from astropy import units as u
from astropy.wcs import WCS
from astropy.io import fits
from astropy.coordinates import SkyCoord
from astropy.tests.helper import catch_warnings
from astropy.tests.image_tests import ignore_matplotlibrc
from astropy.visualization.wcsaxes.core import WCSAxes
from astropy.visualization.wcsaxes.utils import get_coord_meta
from astropy.visualization.wcsaxes.transforms import CurvedTransform
MATPLOTLIB_LT_21 = LooseVersion(matplotlib.__version__) < LooseVersion("2.1")
DATA = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
@ignore_matplotlibrc
def test_grid_regression():
# Regression test for a bug that meant that if the rc parameter
# axes.grid was set to True, WCSAxes would crash upon initalization.
plt.rc('axes', grid=True)
fig = plt.figure(figsize=(3, 3))
WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
@ignore_matplotlibrc
def test_format_coord_regression(tmpdir):
# Regression test for a bug that meant that if format_coord was called by
# Matplotlib before the axes were drawn, an error occurred.
fig = plt.figure(figsize=(3, 3))
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
assert ax.format_coord(10, 10) == ""
assert ax.coords[0].format_coord(10) == ""
assert ax.coords[1].format_coord(10) == ""
fig.savefig(tmpdir.join('nothing').strpath)
assert ax.format_coord(10, 10) == "10.0 10.0 (world)"
assert ax.coords[0].format_coord(10) == "10.0"
assert ax.coords[1].format_coord(10) == "10.0"
TARGET_HEADER = fits.Header.fromstring("""
NAXIS = 2
NAXIS1 = 200
NAXIS2 = 100
CTYPE1 = 'RA---MOL'
CRPIX1 = 500
CRVAL1 = 180.0
CDELT1 = -0.4
CUNIT1 = 'deg '
CTYPE2 = 'DEC--MOL'
CRPIX2 = 400
CRVAL2 = 0.0
CDELT2 = 0.4
CUNIT2 = 'deg '
COORDSYS= 'icrs '
""", sep='\n')
@ignore_matplotlibrc
def test_no_numpy_warnings(tmpdir):
# Make sure that no warnings are raised if some pixels are outside WCS
# (since this is normal)
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.imshow(np.zeros((100, 200)))
ax.coords.grid(color='white')
with catch_warnings(RuntimeWarning) as ws:
plt.savefig(tmpdir.join('test.png').strpath)
# For debugging
for w in ws:
print(w)
assert len(ws) == 0
@ignore_matplotlibrc
def test_invalid_frame_overlay():
# Make sure a nice error is returned if a frame doesn't exist
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
with pytest.raises(ValueError) as exc:
ax.get_coords_overlay('banana')
assert exc.value.args[0] == 'Unknown frame: banana'
with pytest.raises(ValueError) as exc:
get_coord_meta('banana')
assert exc.value.args[0] == 'Unknown frame: banana'
@ignore_matplotlibrc
def test_plot_coord_transform():
twoMASS_k_header = os.path.join(DATA, '2MASS_k_header')
twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(359.76045223*u.deg, 0.26876217*u.deg)
with pytest.raises(TypeError):
ax.plot_coord(c, 'o', transform=ax.get_transform('galactic'))
@ignore_matplotlibrc
def test_set_label_properties():
# Regression test to make sure that arguments passed to
# set_xlabel/set_ylabel are passed to the underlying coordinate helpers
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.set_xlabel('Test x label', labelpad=2, color='red')
ax.set_ylabel('Test y label', labelpad=3, color='green')
assert ax.coords[0].axislabels.get_text() == 'Test x label'
assert ax.coords[0].axislabels.get_minpad('b') == 2
assert ax.coords[0].axislabels.get_color() == 'red'
assert ax.coords[1].axislabels.get_text() == 'Test y label'
assert ax.coords[1].axislabels.get_minpad('l') == 3
assert ax.coords[1].axislabels.get_color() == 'green'
GAL_HEADER = fits.Header.fromstring("""
SIMPLE = T / conforms to FITS standard
BITPIX = -32 / array data type
NAXIS = 3 / number of array dimensions
NAXIS1 = 31
NAXIS2 = 2881
NAXIS3 = 480
EXTEND = T
CTYPE1 = 'DISTMOD '
CRVAL1 = 3.5
CDELT1 = 0.5
CRPIX1 = 1.0
CTYPE2 = 'GLON-CAR'
CRVAL2 = 180.0
CDELT2 = -0.125
CRPIX2 = 1.0
CTYPE3 = 'GLAT-CAR'
CRVAL3 = 0.0
CDELT3 = 0.125
CRPIX3 = 241.0
""", sep='\n')
@ignore_matplotlibrc
def test_slicing_warnings(tmpdir):
# Regression test to make sure that no warnings are emitted by the tick
# locator for the sliced axis when slicing a cube.
# Scalar case
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ['x', 'y', 'z']
wcs3d.wcs.cunit = ['deg', 'deg', 'km/s']
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0., 0., 1.]
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
plt.savefig(tmpdir.join('test.png').strpath)
# For easy debugging if there are indeed warnings
for warning in warning_lines:
print(warning)
assert len(warning_lines) == 0
# Angle case
wcs3d = WCS(GAL_HEADER)
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 2))
plt.savefig(tmpdir.join('test.png').strpath)
# For easy debugging if there are indeed warnings
for warning in warning_lines:
print(warning)
assert len(warning_lines) == 0
def test_plt_xlabel_ylabel(tmpdir):
# Regression test for a bug that happened when using plt.xlabel
# and plt.ylabel with Matplotlib 3.0
plt.subplot(projection=WCS())
plt.xlabel('Galactic Longitude')
plt.ylabel('Galactic Latitude')
plt.savefig(tmpdir.join('test.png').strpath)
def test_grid_type_contours_transform(tmpdir):
# Regression test for a bug that caused grid_type='contours' to not work
# with custom transforms
class CustomTransform(CurvedTransform):
# We deliberately don't define the inverse, and has_inverse should
# default to False.
def transform(self, values):
return values * 1.3
transform = CustomTransform()
coord_meta = {'type': ('scalar', 'scalar'),
'unit': (u.m, u.s),
'wrap': (None, None),
'name': ('x', 'y')}
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8],
transform=transform, coord_meta=coord_meta)
fig.add_axes(ax)
ax.grid(grid_type='contours')
fig.savefig(tmpdir.join('test.png').strpath)
def test_plt_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# plt.imshow was called.
ax = plt.subplot(projection=WCS())
plt.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_ax_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# ax.imshow was called with no origin
ax = plt.subplot(projection=WCS())
ax.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_grid_contour_large_spacing(tmpdir):
# Regression test for a bug that caused a crash when grid was called and
# didn't produce grid lines (due e.g. to too large spacing) and was then
# called again.
filename = tmpdir.join('test.png').strpath
ax = plt.subplot(projection=WCS())
ax.set_xlim(-0.5, 1.5)
ax.set_ylim(-0.5, 1.5)
ax.coords[0].set_ticks(values=[] * u.one)
ax.coords[0].grid(grid_type='contours')
plt.savefig(filename)
ax.coords[0].grid(grid_type='contours')
plt.savefig(filename)
def test_contour_return():
# Regression test for a bug that caused contour and contourf to return None
# instead of the contour object.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
cset = ax.contour(np.arange(16).reshape(4, 4), transform=ax.get_transform('world'))
assert isinstance(cset, QuadContourSet)
cset = ax.contourf(np.arange(16).reshape(4, 4), transform=ax.get_transform('world'))
assert isinstance(cset, QuadContourSet)
@pytest.mark.skipif('MATPLOTLIB_LT_21')
def test_contour_empty():
# Regression test for a bug that caused contour to crash if no contours
# were present.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
ax.contour(np.zeros((4, 4)), transform=ax.get_transform('world'))
|
dfbc55ce2cb616f833ec095397dbcd2d6287f6842e37e9f9f9b749cc4aa00b16 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_almost_equal
from astropy import units as u
from astropy.visualization.wcsaxes.utils import (select_step_degree, select_step_hour, select_step_scalar,
coord_type_from_ctype)
from astropy.tests.helper import (assert_quantity_allclose as
assert_almost_equal_quantity)
def test_select_step_degree():
assert_almost_equal_quantity(select_step_degree(127 * u.deg), 180. * u.deg)
assert_almost_equal_quantity(select_step_degree(44 * u.deg), 45. * u.deg)
assert_almost_equal_quantity(select_step_degree(18 * u.arcmin), 15 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(3.4 * u.arcmin), 3 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(2 * u.arcmin), 2 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(59 * u.arcsec), 1 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(33 * u.arcsec), 30 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(2.2 * u.arcsec), 2 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.8 * u.arcsec), 1 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.2 * u.arcsec), 0.2 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.11 * u.arcsec), 0.1 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.022 * u.arcsec), 0.02 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.0043 * u.arcsec), 0.005 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.00083 * u.arcsec), 0.001 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.000027 * u.arcsec), 0.00002 * u.arcsec)
def test_select_step_hour():
assert_almost_equal_quantity(select_step_hour(127 * u.deg), 8. * u.hourangle)
assert_almost_equal_quantity(select_step_hour(44 * u.deg), 3. * u.hourangle)
assert_almost_equal_quantity(select_step_hour(18 * u.arcmin), 15 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(3.4 * u.arcmin), 3 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(2 * u.arcmin), 1.5 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(59 * u.arcsec), 1 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(33 * u.arcsec), 30 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(2.2 * u.arcsec), 3. * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.8 * u.arcsec), 0.75 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.2 * u.arcsec), 0.15 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.11 * u.arcsec), 0.15 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.022 * u.arcsec), 0.03 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.0043 * u.arcsec), 0.003 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.00083 * u.arcsec), 0.00075 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.000027 * u.arcsec), 0.00003 * u.arcsec)
def test_select_step_scalar():
assert_almost_equal(select_step_scalar(33122.), 50000.)
assert_almost_equal(select_step_scalar(433.), 500.)
assert_almost_equal(select_step_scalar(12.3), 10)
assert_almost_equal(select_step_scalar(3.3), 5.)
assert_almost_equal(select_step_scalar(0.66), 0.5)
assert_almost_equal(select_step_scalar(0.0877), 0.1)
assert_almost_equal(select_step_scalar(0.00577), 0.005)
assert_almost_equal(select_step_scalar(0.00022), 0.0002)
assert_almost_equal(select_step_scalar(0.000012), 0.00001)
assert_almost_equal(select_step_scalar(0.000000443), 0.0000005)
def test_coord_type_from_ctype():
assert coord_type_from_ctype(' LON') == ('longitude', None, None)
assert coord_type_from_ctype(' LAT') == ('latitude', None, None)
assert coord_type_from_ctype('HPLN') == ('longitude', u.arcsec, 180.)
assert coord_type_from_ctype('HPLT') == ('latitude', u.arcsec, None)
assert coord_type_from_ctype('RA--') == ('longitude', u.hourangle, None)
assert coord_type_from_ctype('DEC-') == ('latitude', None, None)
assert coord_type_from_ctype('spam') == ('scalar', None, None)
|
f1661782525f11cdbbf234f2b5e296665ee89eb37b234c60cc844337b3728291 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
import matplotlib.pyplot as plt
from astropy.wcs import WCS
from astropy.visualization.wcsaxes import WCSAxes
from astropy.visualization.wcsaxes.frame import BaseFrame
from astropy.tests.image_tests import IMAGE_REFERENCE_DIR
from .test_images import BaseImageTests
class HexagonalFrame(BaseFrame):
spine_names = 'abcdef'
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
ymid = 0.5 * (ymin + ymax)
xmid1 = (xmin + xmax) / 4.
xmid2 = (xmin + xmax) * 3. / 4.
self['a'].data = np.array(([xmid1, ymin], [xmid2, ymin]))
self['b'].data = np.array(([xmid2, ymin], [xmax, ymid]))
self['c'].data = np.array(([xmax, ymid], [xmid2, ymax]))
self['d'].data = np.array(([xmid2, ymax], [xmid1, ymax]))
self['e'].data = np.array(([xmid1, ymax], [xmin, ymid]))
self['f'].data = np.array(([xmin, ymid], [xmid1, ymin]))
class TestFrame(BaseImageTests):
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_custom_frame(self):
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7],
wcs=wcs,
frame_class=HexagonalFrame)
fig.add_axes(ax)
ax.coords.grid(color='white')
im = ax.imshow(np.ones((149, 149)), vmin=0., vmax=2.,
origin='lower', cmap=plt.cm.gist_heat)
minpad = {}
minpad['a'] = minpad['d'] = 1
minpad['b'] = minpad['c'] = minpad['e'] = minpad['f'] = 2.75
ax.coords['glon'].set_axislabel("Longitude", minpad=minpad)
ax.coords['glon'].set_axislabel_position('ad')
ax.coords['glat'].set_axislabel("Latitude", minpad=minpad)
ax.coords['glat'].set_axislabel_position('bcef')
ax.coords['glon'].set_ticklabel_position('ad')
ax.coords['glat'].set_ticklabel_position('bcef')
# Set limits so that no labels overlap
ax.set_xlim(5.5, 100.5)
ax.set_ylim(5.5, 110.5)
# Clip the image to the frame
im.set_clip_path(ax.coords.frame.patch)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_update_clip_path_rectangular(self, tmpdir):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
fig.add_axes(ax)
ax.set_xlim(0., 2.)
ax.set_ylim(0., 2.)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmpdir.join('nothing').strpath)
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_update_clip_path_nonrectangular(self, tmpdir):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal',
frame_class=HexagonalFrame)
fig.add_axes(ax)
ax.set_xlim(0., 2.)
ax.set_ylim(0., 2.)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmpdir.join('nothing').strpath)
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_update_clip_path_change_wcs(self, tmpdir):
# When WCS is changed, a new frame is created, so we need to make sure
# that the path is carried over to the new frame.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
fig.add_axes(ax)
ax.set_xlim(0., 2.)
ax.set_ylim(0., 2.)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmpdir.join('nothing').strpath)
ax.reset_wcs()
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
return fig
def test_copy_frame_properties_change_wcs(self):
# When WCS is changed, a new frame is created, so we need to make sure
# that the color and linewidth are transferred over
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
ax.coords.frame.set_linewidth(5)
ax.coords.frame.set_color('purple')
ax.reset_wcs()
assert ax.coords.frame.get_linewidth() == 5
assert ax.coords.frame.get_color() == 'purple'
|
d949f1fa97faa1524a4408c8a5bd61bf3bf55723f7d358872245d0c6750d52e3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Downloads the FITS files that are used in image testing and for building documentation.
"""
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits
__all__ = ['fetch_msx_hdu',
'fetch_rosat_hdu',
'fetch_twoMASS_k_hdu',
'fetch_l1448_co_hdu',
'fetch_bolocam_hdu',
]
def fetch_hdu(filename):
"""
Download a FITS file to the cache and open HDU 0.
"""
path = get_pkg_data_filename(filename)
return fits.open(path)[0]
def fetch_msx_hdu():
"""Fetch the MSX example dataset HDU.
Returns
-------
hdu : `~astropy.io.fits.ImageHDU`
Image HDU
"""
return fetch_hdu('galactic_center/gc_msx_e.fits')
def fetch_rosat_hdu():
return fetch_hdu('allsky/allsky_rosat.fits')
def fetch_twoMASS_k_hdu():
return fetch_hdu('galactic_center/gc_2mass_k.fits')
def fetch_l1448_co_hdu():
return fetch_hdu('l1448/l1448_13co.fits')
def fetch_bolocam_hdu():
return fetch_hdu('galactic_center/gc_bolocam_gps.fits')
|
3d73cebd4c71f7933af150f438a8548215eb02d079c1abb69b2b4e900246e93b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
import matplotlib.pyplot as plt
from astropy import units as u
from astropy.wcs import WCS
from astropy.visualization.wcsaxes import WCSAxes
from .test_images import BaseImageTests
from astropy.visualization.wcsaxes.transforms import CurvedTransform
from astropy.tests.image_tests import IMAGE_REFERENCE_DIR
# Create fake transforms that roughly mimic a polar projection
class DistanceToLonLat(CurvedTransform):
has_inverse = True
def __init__(self, R=6e3):
super().__init__()
self.R = R
def transform(self, xy):
x, y = xy[:, 0], xy[:, 1]
lam = np.degrees(np.arctan2(y, x))
phi = 90. - np.degrees(np.hypot(x, y) / self.R)
return np.array((lam, phi)).transpose()
transform_non_affine = transform
def inverted(self):
return LonLatToDistance(R=self.R)
class LonLatToDistance(CurvedTransform):
def __init__(self, R=6e3):
super().__init__()
self.R = R
def transform(self, lamphi):
lam, phi = lamphi[:, 0], lamphi[:, 1]
r = np.radians(90 - phi) * self.R
x = r * np.cos(np.radians(lam))
y = r * np.sin(np.radians(lam))
return np.array((x, y)).transpose()
transform_non_affine = transform
def inverted(self):
return DistanceToLonLat(R=self.R)
class TestTransformCoordMeta(BaseImageTests):
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_coords_overlay(self):
# Set up a simple WCS that maps pixels to non-projected distances
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['x', 'y']
wcs.wcs.cunit = ['km', 'km']
wcs.wcs.crpix = [614.5, 856.5]
wcs.wcs.cdelt = [6.25, 6.25]
wcs.wcs.crval = [0., 0.]
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], wcs=wcs)
fig.add_axes(ax)
s = DistanceToLonLat(R=6378.273)
ax.coords['x'].set_ticklabel_position('')
ax.coords['y'].set_ticklabel_position('')
coord_meta = {}
coord_meta['type'] = ('longitude', 'latitude')
coord_meta['wrap'] = (360., None)
coord_meta['unit'] = (u.deg, u.deg)
coord_meta['name'] = 'lon', 'lat'
overlay = ax.get_coords_overlay(s, coord_meta=coord_meta)
overlay.grid(color='red')
overlay['lon'].grid(color='red', linestyle='solid', alpha=0.3)
overlay['lat'].grid(color='blue', linestyle='solid', alpha=0.3)
overlay['lon'].set_ticklabel(size=7, exclude_overlapping=True)
overlay['lat'].set_ticklabel(size=7, exclude_overlapping=True)
overlay['lon'].set_ticklabel_position('brtl')
overlay['lat'].set_ticklabel_position('brtl')
overlay['lon'].set_ticks(spacing=10. * u.deg)
overlay['lat'].set_ticks(spacing=10. * u.deg)
ax.set_xlim(-0.5, 1215.5)
ax.set_ylim(-0.5, 1791.5)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_coords_overlay_auto_coord_meta(self):
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], wcs=WCS(self.msx_header))
fig.add_axes(ax)
ax.grid(color='red', alpha=0.5, linestyle='solid')
overlay = ax.get_coords_overlay('fk5') # automatically sets coord_meta
overlay.grid(color='black', alpha=0.5, linestyle='solid')
overlay['ra'].set_ticks(color='black')
overlay['dec'].set_ticks(color='black')
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_direct_init(self):
s = DistanceToLonLat(R=6378.273)
coord_meta = {}
coord_meta['type'] = ('longitude', 'latitude')
coord_meta['wrap'] = (360., None)
coord_meta['unit'] = (u.deg, u.deg)
coord_meta['name'] = 'lon', 'lat'
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], transform=s, coord_meta=coord_meta)
fig.add_axes(ax)
ax.coords['lon'].grid(color='red', linestyle='solid', alpha=0.3)
ax.coords['lat'].grid(color='blue', linestyle='solid', alpha=0.3)
ax.coords['lon'].set_ticklabel(size=7, exclude_overlapping=True)
ax.coords['lat'].set_ticklabel(size=7, exclude_overlapping=True)
ax.coords['lon'].set_ticklabel_position('brtl')
ax.coords['lat'].set_ticklabel_position('brtl')
ax.coords['lon'].set_ticks(spacing=10. * u.deg)
ax.coords['lat'].set_ticks(spacing=10. * u.deg)
ax.set_xlim(-400., 500.)
ax.set_ylim(-300., 400.)
return fig
|
bd5eaf702248b8848a463425cca27e47eec1af8d97f40190f83114e8e585be49 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
from matplotlib import rc_context
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import UnitsError
from astropy.visualization.wcsaxes.formatter_locator import AngleFormatterLocator, ScalarFormatterLocator
class TestAngleFormatterLocator:
def test_no_options(self):
fl = AngleFormatterLocator()
assert fl.values is None
assert fl.number == 5
assert fl.spacing is None
def test_too_many_options(self):
with pytest.raises(ValueError) as exc:
AngleFormatterLocator(values=[1., 2.], number=5)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
AngleFormatterLocator(values=[1., 2.], spacing=5. * u.deg)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
AngleFormatterLocator(number=5, spacing=5. * u.deg)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
AngleFormatterLocator(values=[1., 2.], number=5, spacing=5. * u.deg)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
def test_values(self):
fl = AngleFormatterLocator(values=[0.1, 1., 14.] * u.degree)
assert fl.values.to_value(u.degree).tolist() == [0.1, 1., 14.]
assert fl.number is None
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.to_value(u.degree), [0.1, 1., 14.])
def test_number(self):
fl = AngleFormatterLocator(number=7)
assert fl.values is None
assert fl.number == 7
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.to_value(u.degree), [35., 40., 45., 50., 55.])
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [34.5, 34.75, 35., 35.25, 35.5, 35.75, 36.])
fl.format = 'dd'
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [35., 36.])
def test_spacing(self):
with pytest.raises(TypeError) as exc:
AngleFormatterLocator(spacing=3.)
assert exc.value.args[0] == "spacing should be an astropy.units.Quantity instance with units of angle"
fl = AngleFormatterLocator(spacing=3. * u.degree)
assert fl.values is None
assert fl.number is None
assert fl.spacing == 3. * u.degree
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.to_value(u.degree), [36., 39., 42., 45., 48., 51., 54.])
fl.spacing = 30. * u.arcmin
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [34.5, 35., 35.5, 36.])
with pytest.warns(UserWarning, match='Spacing is too small'):
fl.format = 'dd'
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [35., 36.])
def test_minor_locator(self):
fl = AngleFormatterLocator()
values, spacing = fl.locator(34.3, 55.4)
minor_values = fl.minor_locator(spacing, 5, 34.3, 55.4)
assert_almost_equal(minor_values.to_value(u.degree), [36., 37., 38.,
39., 41., 42., 43., 44., 46., 47., 48., 49., 51.,
52., 53., 54.])
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.to_value(u.degree), [37.5, 42.5, 47.5, 52.5])
fl.values = [0.1, 1., 14.] * u.degree
values, spacing = fl.locator(34.3, 36.1)
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.to_value(u.degree), [])
@pytest.mark.parametrize(('format', 'string'), [('dd', '15\xb0'),
('dd:mm', '15\xb024\''),
('dd:mm:ss', '15\xb023\'32"'),
('dd:mm:ss.s', '15\xb023\'32.0"'),
('dd:mm:ss.ssss', '15\xb023\'32.0316"'),
('hh', '1h'),
('hh:mm', '1h02m'),
('hh:mm:ss', '1h01m34s'),
('hh:mm:ss.s', '1h01m34.1s'),
('hh:mm:ss.ssss', '1h01m34.1354s'),
('d', '15\xb0'),
('d.d', '15.4\xb0'),
('d.dd', '15.39\xb0'),
('d.ddd', '15.392\xb0'),
('m', '924\''),
('m.m', '923.5\''),
('m.mm', '923.53\''),
('s', '55412"'),
('s.s', '55412.0"'),
('s.ss', '55412.03"'),
])
def test_format(self, format, string):
fl = AngleFormatterLocator(number=5, format=format)
print(fl.formatter([15.392231] * u.degree, None, format='ascii')[0], string)
assert fl.formatter([15.392231] * u.degree, None, format='ascii')[0] == string
@pytest.mark.parametrize(('separator', 'format', 'string'), [(('deg', "'", '"'), 'dd', '15deg'),
(('deg', "'", '"'), 'dd:mm', '15deg24\''),
(('deg', "'", '"'), 'dd:mm:ss', '15deg23\'32"'),
((':', "-", 's'), 'dd:mm:ss.s', '15:23-32.0s'),
(':', 'dd:mm:ss.s', '15:23:32.0'),
((':', ":", 's'), 'hh', '1:'),
(('-', "-", 's'), 'hh:mm:ss.ssss', '1-01-34.1354s'),
(('d', ":", '"'), 'd', '15\xb0'),
(('d', ":", '"'), 'd.d', '15.4\xb0'),
])
def test_separator(self, separator, format, string):
fl = AngleFormatterLocator(number=5, format=format)
fl.sep = separator
assert fl.formatter([15.392231] * u.degree, None)[0] == string
def test_latex_format(self):
fl = AngleFormatterLocator(number=5, format="dd:mm:ss")
assert fl.formatter([15.392231] * u.degree, None)[0] == '15\xb023\'32"'
with rc_context(rc={'text.usetex': True}):
assert fl.formatter([15.392231] * u.degree, None)[0] == "$15^\\circ23{}^\\prime32{}^{\\prime\\prime}$"
@pytest.mark.parametrize(('format'), ['x.xxx', 'dd.ss', 'dd:ss', 'mdd:mm:ss'])
def test_invalid_formats(self, format):
fl = AngleFormatterLocator(number=5)
with pytest.raises(ValueError) as exc:
fl.format = format
assert exc.value.args[0] == "Invalid format: " + format
@pytest.mark.parametrize(('format', 'base_spacing'), [('dd', 1. * u.deg),
('dd:mm', 1. * u.arcmin),
('dd:mm:ss', 1. * u.arcsec),
('dd:mm:ss.ss', 0.01 * u.arcsec),
('hh', 15. * u.deg),
('hh:mm', 15. * u.arcmin),
('hh:mm:ss', 15. * u.arcsec),
('hh:mm:ss.ss', 0.15 * u.arcsec),
('d', 1. * u.deg),
('d.d', 0.1 * u.deg),
('d.dd', 0.01 * u.deg),
('d.ddd', 0.001 * u.deg),
('m', 1. * u.arcmin),
('m.m', 0.1 * u.arcmin),
('m.mm', 0.01 * u.arcmin),
('s', 1. * u.arcsec),
('s.s', 0.1 * u.arcsec),
('s.ss', 0.01 * u.arcsec),
])
def test_base_spacing(self, format, base_spacing):
fl = AngleFormatterLocator(number=5, format=format)
assert fl.base_spacing == base_spacing
def test_incorrect_spacing(self):
fl = AngleFormatterLocator()
fl.spacing = 0.032 * u.deg
with pytest.warns(UserWarning, match='Spacing is not a multiple of base spacing'):
fl.format = 'dd:mm:ss'
assert_almost_equal(fl.spacing.to_value(u.arcsec), 115.)
def test_decimal_values(self):
# Regression test for a bug that meant that the spacing was not
# determined correctly for decimal coordinates
fl = AngleFormatterLocator()
fl.format = 'd.dddd'
assert_quantity_allclose(fl.locator(266.9730, 266.9750)[0],
[266.9735, 266.9740, 266.9745, 266.9750] * u.deg)
fl = AngleFormatterLocator(decimal=True, format_unit=u.hourangle, number=4)
assert_quantity_allclose(fl.locator(266.9730, 266.9750)[0],
[17.79825, 17.79830] * u.hourangle)
def test_values_unit(self):
# Make sure that the intrinsic unit and format unit are correctly
# taken into account when using the locator
fl = AngleFormatterLocator(unit=u.arcsec, format_unit=u.arcsec, decimal=True)
assert_quantity_allclose(fl.locator(850, 2150)[0],
[1000., 1200., 1400., 1600., 1800., 2000.] * u.arcsec)
fl = AngleFormatterLocator(unit=u.arcsec, format_unit=u.degree, decimal=False)
assert_quantity_allclose(fl.locator(850, 2150)[0],
[15., 20., 25., 30., 35.] * u.arcmin)
fl = AngleFormatterLocator(unit=u.arcsec, format_unit=u.hourangle, decimal=False)
assert_quantity_allclose(fl.locator(850, 2150)[0],
[60., 75., 90., 105., 120., 135.] * (15 * u.arcsec))
fl = AngleFormatterLocator(unit=u.arcsec)
fl.format = 'dd:mm:ss'
assert_quantity_allclose(fl.locator(0.9, 1.1)[0], [1] * u.arcsec)
fl = AngleFormatterLocator(unit=u.arcsec, spacing=0.2 * u.arcsec)
assert_quantity_allclose(fl.locator(0.3, 0.9)[0], [0.4, 0.6, 0.8] * u.arcsec)
@pytest.mark.parametrize(('spacing', 'string'), [(2 * u.deg, '15\xb0'),
(2 * u.arcmin, '15\xb024\''),
(2 * u.arcsec, '15\xb023\'32"'),
(0.1 * u.arcsec, '15\xb023\'32.0"')])
def test_formatter_no_format(self, spacing, string):
fl = AngleFormatterLocator()
assert fl.formatter([15.392231] * u.degree, spacing)[0] == string
@pytest.mark.parametrize(('format_unit', 'decimal', 'show_decimal_unit', 'spacing', 'ascii', 'latex'),
[(u.degree, False, True, 2 * u.degree, '15\xb0', r'$15^\circ$'),
(u.degree, False, True, 2 * u.arcmin, '15\xb024\'', r'$15^\circ24{}^\prime$'),
(u.degree, False, True, 2 * u.arcsec, '15\xb023\'32"', r'$15^\circ23{}^\prime32{}^{\prime\prime}$'),
(u.degree, False, True, 0.1 * u.arcsec, '15\xb023\'32.0"', r'$15^\circ23{}^\prime32.0{}^{\prime\prime}$'),
(u.hourangle, False, True, 15 * u.degree, '1h', r'$1^\mathrm{h}$'),
(u.hourangle, False, True, 15 * u.arcmin, '1h02m', r'$1^\mathrm{h}02^\mathrm{m}$'),
(u.hourangle, False, True, 15 * u.arcsec, '1h01m34s', r'$1^\mathrm{h}01^\mathrm{m}34^\mathrm{s}$'),
(u.hourangle, False, True, 1.5 * u.arcsec, '1h01m34.1s', r'$1^\mathrm{h}01^\mathrm{m}34.1^\mathrm{s}$'),
(u.degree, True, True, 15 * u.degree, '15\xb0', r'$15\mathrm{^\circ}$'),
(u.degree, True, True, 0.12 * u.degree, '15.39\xb0', r'$15.39\mathrm{^\circ}$'),
(u.degree, True, True, 0.0036 * u.arcsec, '15.392231\xb0', r'$15.392231\mathrm{^\circ}$'),
(u.arcmin, True, True, 15 * u.degree, '924\'', r'$924\mathrm{^\prime}$'),
(u.arcmin, True, True, 0.12 * u.degree, '923.5\'', r'$923.5\mathrm{^\prime}$'),
(u.arcmin, True, True, 0.1 * u.arcmin, '923.5\'', r'$923.5\mathrm{^\prime}$'),
(u.arcmin, True, True, 0.0002 * u.arcmin, '923.5339\'', r'$923.5339\mathrm{^\prime}$'),
(u.arcsec, True, True, 0.01 * u.arcsec, '55412.03"', r'$55412.03\mathrm{^{\prime\prime}}$'),
(u.arcsec, True, True, 0.001 * u.arcsec, '55412.032"', r'$55412.032\mathrm{^{\prime\prime}}$'),
(u.mas, True, True, 0.001 * u.arcsec, '55412032mas', r'$55412032\mathrm{mas}$'),
(u.degree, True, False, 15 * u.degree, '15', '15'),
(u.degree, True, False, 0.12 * u.degree, '15.39', '15.39'),
(u.degree, True, False, 0.0036 * u.arcsec, '15.392231', '15.392231'),
(u.arcmin, True, False, 15 * u.degree, '924', '924'),
(u.arcmin, True, False, 0.12 * u.degree, '923.5', '923.5'),
(u.arcmin, True, False, 0.1 * u.arcmin, '923.5', '923.5'),
(u.arcmin, True, False, 0.0002 * u.arcmin, '923.5339', '923.5339'),
(u.arcsec, True, False, 0.01 * u.arcsec, '55412.03', '55412.03'),
(u.arcsec, True, False, 0.001 * u.arcsec, '55412.032', '55412.032'),
(u.mas, True, False, 0.001 * u.arcsec, '55412032', '55412032'),
# Make sure that specifying None defaults to
# decimal for non-degree or non-hour angles
(u.arcsec, None, True, 0.01 * u.arcsec, '55412.03"', r'$55412.03\mathrm{^{\prime\prime}}$')])
def test_formatter_no_format_with_units(self, format_unit, decimal, show_decimal_unit, spacing, ascii, latex):
# Check the formatter works when specifying the default units and
# decimal behavior to use.
fl = AngleFormatterLocator(unit=u.degree, format_unit=format_unit, decimal=decimal, show_decimal_unit=show_decimal_unit)
assert fl.formatter([15.392231] * u.degree, spacing, format='ascii')[0] == ascii
assert fl.formatter([15.392231] * u.degree, spacing, format='latex')[0] == latex
def test_incompatible_unit_decimal(self):
with pytest.raises(UnitsError) as exc:
AngleFormatterLocator(unit=u.arcmin, decimal=False)
assert exc.value.args[0] == 'Units should be degrees or hours when using non-decimal (sexagesimal) mode'
class TestScalarFormatterLocator:
def test_no_options(self):
fl = ScalarFormatterLocator(unit=u.m)
assert fl.values is None
assert fl.number == 5
assert fl.spacing is None
def test_too_many_options(self):
with pytest.raises(ValueError) as exc:
ScalarFormatterLocator(values=[1., 2.] * u.m, number=5)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
ScalarFormatterLocator(values=[1., 2.] * u.m, spacing=5. * u.m)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
ScalarFormatterLocator(number=5, spacing=5. * u.m)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
ScalarFormatterLocator(values=[1., 2.] * u.m, number=5, spacing=5. * u.m)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
def test_values(self):
fl = ScalarFormatterLocator(values=[0.1, 1., 14.] * u.m, unit=u.m)
assert fl.values.value.tolist() == [0.1, 1., 14.]
assert fl.number is None
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, [0.1, 1., 14.])
def test_number(self):
fl = ScalarFormatterLocator(number=7, unit=u.m)
assert fl.values is None
assert fl.number == 7
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, np.linspace(36., 54., 10))
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, np.linspace(34.4, 36, 9))
fl.format = 'x'
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [35., 36.])
def test_spacing(self):
fl = ScalarFormatterLocator(spacing=3. * u.m)
assert fl.values is None
assert fl.number is None
assert fl.spacing == 3. * u.m
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, [36., 39., 42., 45., 48., 51., 54.])
fl.spacing = 0.5 * u.m
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [34.5, 35., 35.5, 36.])
with pytest.warns(UserWarning, match='Spacing is too small'):
fl.format = 'x'
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [35., 36.])
def test_minor_locator(self):
fl = ScalarFormatterLocator(unit=u.m)
values, spacing = fl.locator(34.3, 55.4)
minor_values = fl.minor_locator(spacing, 5, 34.3, 55.4)
assert_almost_equal(minor_values.value, [36., 37., 38., 39., 41., 42.,
43., 44., 46., 47., 48., 49., 51., 52., 53., 54.])
print('minor_values: ' + str(minor_values))
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.value, [37.5, 42.5, 47.5, 52.5])
fl.values = [0.1, 1., 14.] * u.m
values, spacing = fl.locator(34.3, 36.1)
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.value, [])
@pytest.mark.parametrize(('format', 'string'), [('x', '15'),
('x.x', '15.4'),
('x.xx', '15.39'),
('x.xxx', '15.392'),
('%g', '15.3922'),
('%f', '15.392231'),
('%.2f', '15.39'),
('%.3f', '15.392')])
def test_format(self, format, string):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
assert fl.formatter([15.392231] * u.m, None)[0] == string
@pytest.mark.parametrize(('format', 'string'), [('x', '1539'),
('x.x', '1539.2'),
('x.xx', '1539.22'),
('x.xxx', '1539.223')])
def test_format_unit(self, format, string):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
fl.format_unit = u.cm
assert fl.formatter([15.392231] * u.m, None)[0] == string
@pytest.mark.parametrize(('format'), ['dd', 'dd:mm', 'xx:mm', 'mx.xxx'])
def test_invalid_formats(self, format):
fl = ScalarFormatterLocator(number=5, unit=u.m)
with pytest.raises(ValueError) as exc:
fl.format = format
assert exc.value.args[0] == "Invalid format: " + format
@pytest.mark.parametrize(('format', 'base_spacing'), [('x', 1. * u.m),
('x.x', 0.1 * u.m),
('x.xxx', 0.001 * u.m)])
def test_base_spacing(self, format, base_spacing):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
assert fl.base_spacing == base_spacing
def test_incorrect_spacing(self):
fl = ScalarFormatterLocator(unit=u.m)
fl.spacing = 0.032 * u.m
with pytest.warns(UserWarning, match='Spacing is not a multiple of base spacing'):
fl.format = 'x.xx'
assert_almost_equal(fl.spacing.to_value(u.m), 0.03)
def test_values_unit(self):
# Make sure that the intrinsic unit and format unit are correctly
# taken into account when using the locator
fl = ScalarFormatterLocator(unit=u.cm, format_unit=u.m)
assert_quantity_allclose(fl.locator(850, 2150)[0],
[1000., 1200., 1400., 1600., 1800., 2000.] * u.cm)
fl = ScalarFormatterLocator(unit=u.cm, format_unit=u.m)
fl.format = 'x.x'
assert_quantity_allclose(fl.locator(1, 19)[0], [10] * u.cm)
|
744053dc5dbf7dc700ca879a75f476588dd7817f02b21ed87add8df479c4f050 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.visualization.wcsaxes.core import WCSAxes
import matplotlib.pyplot as plt
from matplotlib.backend_bases import KeyEvent
from astropy.wcs import WCS
from astropy.coordinates import FK5
from astropy.time import Time
from astropy.tests.image_tests import ignore_matplotlibrc
from .test_images import BaseImageTests
class TestDisplayWorldCoordinate(BaseImageTests):
@ignore_matplotlibrc
def test_overlay_coords(self, tmpdir):
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 4))
canvas = fig.canvas
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs)
fig.add_axes(ax)
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test1.png').strpath)
# Testing default displayed world coordinates
string_world = ax._display_world_coords(0.523412, 0.518311)
assert string_world == '0\xb029\'45" -0\xb029\'20" (world)'
# Test pixel coordinates
event1 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event1.key, guiEvent=event1)
string_pixel = ax._display_world_coords(0.523412, 0.523412)
assert string_pixel == "0.523412 0.523412 (pixel)"
event3 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event3.key, guiEvent=event3)
# Test that it still displays world coords when there are no overlay coords
string_world2 = ax._display_world_coords(0.523412, 0.518311)
assert string_world2 == '0\xb029\'45" -0\xb029\'20" (world)'
overlay = ax.get_coords_overlay('fk5')
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter('d.ddd')
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test2.png').strpath)
event4 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event4.key, guiEvent=event4)
# Test that it displays the overlay world coordinates
string_world3 = ax._display_world_coords(0.523412, 0.518311)
assert string_world3 == '267.176\xb0 -28\xb045\'56" (world, overlay 1)'
overlay = ax.get_coords_overlay(FK5())
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter('d.ddd')
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test3.png').strpath)
event5 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event4.key, guiEvent=event4)
# Test that it displays the overlay world coordinates
string_world4 = ax._display_world_coords(0.523412, 0.518311)
assert string_world4 == '267.176\xb0 -28\xb045\'56" (world, overlay 2)'
overlay = ax.get_coords_overlay(FK5(equinox=Time("J2030")))
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter('d.ddd')
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test4.png').strpath)
event6 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event5.key, guiEvent=event6)
# Test that it displays the overlay world coordinates
string_world5 = ax._display_world_coords(0.523412, 0.518311)
assert string_world5 == '267.652\xb0 -28\xb046\'23" (world, overlay 3)'
@ignore_matplotlibrc
def test_cube_coords(self, tmpdir):
wcs = WCS(self.cube_header)
fig = plt.figure(figsize=(4, 4))
canvas = fig.canvas
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs, slices=('y', 50, 'x'))
fig.add_axes(ax)
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test.png').strpath)
# Testing default displayed world coordinates
string_world = ax._display_world_coords(0.523412, 0.518311)
assert string_world == '2563 3h26m52.0s (world)'
# Test pixel coordinates
event1 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event1.key, guiEvent=event1)
string_pixel = ax._display_world_coords(0.523412, 0.523412)
assert string_pixel == "0.523412 0.523412 (pixel)"
|
91e4d3421e9a48c8f0ff8ea2684fc435707bcd5529460906897779de75290e6e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from unittest.mock import patch
import pytest
import matplotlib.pyplot as plt
from astropy.wcs import WCS
from astropy.io import fits
from astropy.visualization.wcsaxes.core import WCSAxes
from astropy import units as u
from astropy.tests.image_tests import ignore_matplotlibrc
ROOT = os.path.join(os.path.dirname(__file__))
MSX_HEADER = fits.Header.fromtextfile(os.path.join(ROOT, 'data', 'msx_header'))
@ignore_matplotlibrc
def test_getaxislabel():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
ax.coords[0].set_axislabel("X")
ax.coords[1].set_axislabel("Y")
assert ax.coords[0].get_axislabel() == "X"
assert ax.coords[1].get_axislabel() == "Y"
@pytest.fixture
def ax():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
fig.add_axes(ax)
return ax
def assert_label_draw(ax, x_label, y_label):
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
with patch.object(ax.coords[0].axislabels, 'set_position') as pos1:
with patch.object(ax.coords[1].axislabels, 'set_position') as pos2:
ax.figure.canvas.draw()
assert pos1.call_count == x_label
assert pos2.call_count == y_label
@ignore_matplotlibrc
def test_label_visibility_rules_default(ax):
assert_label_draw(ax, True, True)
@ignore_matplotlibrc
def test_label_visibility_rules_label(ax):
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.one)
assert_label_draw(ax, False, False)
@ignore_matplotlibrc
def test_label_visibility_rules_ticks(ax):
ax.coords[0].set_axislabel_visibility_rule('ticks')
ax.coords[1].set_axislabel_visibility_rule('ticks')
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.one)
assert_label_draw(ax, True, False)
@ignore_matplotlibrc
def test_label_visibility_rules_always(ax):
ax.coords[0].set_axislabel_visibility_rule('always')
ax.coords[1].set_axislabel_visibility_rule('always')
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.one)
assert_label_draw(ax, True, True)
def test_set_separator(tmpdir):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=WCS(MSX_HEADER))
fig.add_axes(ax)
# Force a draw which is required for format_coord to work
ax.figure.canvas.draw()
ax.coords[1].set_format_unit('deg')
assert ax.coords[1].format_coord(4) == '4\xb000\'00\"'
ax.coords[1].set_separator((':', ':', ''))
assert ax.coords[1].format_coord(4) == '4:00:00'
ax.coords[1].set_separator('abc')
assert ax.coords[1].format_coord(4) == '4a00b00c'
ax.coords[1].set_separator(None)
assert ax.coords[1].format_coord(4) == '4\xb000\'00\"'
|
96f9c5ebaa43424a3d70ea4af1725202998842fa0b012a3825deb6ed230427be | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import pytest
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, Rectangle
from matplotlib import rc_context
from astropy import units as u
from astropy.io import fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
from astropy.visualization.wcsaxes.patches import SphericalCircle
from astropy.visualization.wcsaxes import WCSAxes
from . import datasets
from astropy.tests.image_tests import IMAGE_REFERENCE_DIR
from astropy.visualization.wcsaxes.frame import EllipticalFrame
# See if matplotlib is a dev version (e.g., 3.0.2+2456.g28e32c6)
MPLDEV = '+' in matplotlib.__version__
class BaseImageTests:
@classmethod
def setup_class(cls):
cls._data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
msx_header = os.path.join(cls._data_dir, 'msx_header')
cls.msx_header = fits.Header.fromtextfile(msx_header)
rosat_header = os.path.join(cls._data_dir, 'rosat_header')
cls.rosat_header = fits.Header.fromtextfile(rosat_header)
twoMASS_k_header = os.path.join(cls._data_dir, '2MASS_k_header')
cls.twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
cube_header = os.path.join(cls._data_dir, 'cube_header')
cls.cube_header = fits.Header.fromtextfile(cube_header)
slice_header = os.path.join(cls._data_dir, 'slice_header')
cls.slice_header = fits.Header.fromtextfile(slice_header)
class TestBasic(BaseImageTests):
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_image_plot(self):
# Test for plotting image and also setting values of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect='equal')
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0., 0.20] * u.degree, size=5, width=1)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=1.5, style={})
@pytest.mark.parametrize('axisbelow', [True, False, 'line'])
def test_axisbelow(self, axisbelow):
# Test that tick marks, labels, and gridlines are drawn with the
# correct zorder controlled by the axisbelow property.
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect='equal')
ax.set_axisbelow(axisbelow)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0., 0.20] * u.degree, size=5, width=1)
ax.grid()
# Add an image (default zorder=0).
ax.imshow(np.zeros((64, 64)))
# Add a patch (default zorder=1).
r = Rectangle((30., 50.), 60., 50., facecolor='green', edgecolor='red')
ax.add_patch(r)
# Add a line (default zorder=2).
ax.plot([32, 128], [32, 128], linewidth=10)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_contour_overlay(self):
# Test for overlaying contours on images
hdu_msx = datasets.fetch_msx_hdu()
wcs_msx = WCS(self.msx_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contour(hdu_msx.data, transform=ax.get_transform(wcs_msx),
colors='orange', levels=[2.5e-5, 5e-5, 1.e-4])
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0., 720.)
ax.set_ylim(0., 720.)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_contourf_overlay(self):
# Test for overlaying contours on images
hdu_msx = datasets.fetch_msx_hdu()
wcs_msx = WCS(self.msx_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contourf(hdu_msx.data, transform=ax.get_transform(wcs_msx),
levels=[2.5e-5, 5e-5, 1.e-4])
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0., 720.)
ax.set_ylim(0., 720.)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_overlay_features_image(self):
# Test for overlaying grid, changing format of ticks, setting spacing
# and number of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.25, 0.25, 0.65, 0.65],
projection=WCS(self.msx_header), aspect='equal')
# Change the format of the ticks
ax.coords[0].set_major_formatter('dd:mm:ss')
ax.coords[1].set_major_formatter('dd:mm:ss.ssss')
# Overlay grid on image
ax.grid(color='red', alpha=1.0, lw=1, linestyle='dashed')
# Set the spacing of ticks on the 'glon' axis to 4 arcsec
ax.coords['glon'].set_ticks(spacing=4 * u.arcsec, size=5, width=1)
# Set the number of ticks on the 'glat' axis to 9
ax.coords['glat'].set_ticks(number=9, size=5, width=1)
# Set labels on axes
ax.coords['glon'].set_axislabel('Galactic Longitude', minpad=1.6)
ax.coords['glat'].set_axislabel('Galactic Latitude', minpad=-0.75)
# Change the frame linewidth and color
ax.coords.frame.set_color('red')
ax.coords.frame.set_linewidth(2)
assert ax.coords.frame.get_color() == 'red'
assert ax.coords.frame.get_linewidth() == 2
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_curvilinear_grid_patches_image(self):
# Overlay curvilinear grid and patches on image
fig = plt.figure(figsize=(8, 8))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.rosat_header), aspect='equal')
ax.set_xlim(-0.5, 479.5)
ax.set_ylim(-0.5, 239.5)
ax.grid(color='black', alpha=1.0, lw=1, linestyle='dashed')
p = Circle((300, 100), radius=40, ec='yellow', fc='none')
ax.add_patch(p)
p = Circle((30., 20.), radius=20., ec='orange', fc='none',
transform=ax.get_transform('world'))
ax.add_patch(p)
p = Circle((60., 50.), radius=20., ec='red', fc='none',
transform=ax.get_transform('fk5'))
ax.add_patch(p)
p = Circle((40., 60.), radius=20., ec='green', fc='none',
transform=ax.get_transform('galactic'))
ax.add_patch(p)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_cube_slice_image(self):
# Test for cube slicing
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, 'y', 'x'), aspect='equal')
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_axislabel('Velocity m/s')
ax.coords[1].set_ticks(spacing=0.2 * u.deg, width=1)
ax.coords[2].set_ticks(spacing=400 * u.m / u.s, width=1)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].set_ticklabel(exclude_overlapping=True)
ax.coords[1].grid(grid_type='contours', color='red', linestyle='solid')
ax.coords[2].grid(grid_type='contours', color='red', linestyle='solid')
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_cube_slice_image_lonlat(self):
# Test for cube slicing. Here we test with longitude and latitude since
# there is some longitude-specific code in _update_grid_contour.
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=('x', 'y', 50), aspect='equal')
ax.set_xlim(-0.5, 106.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].grid(grid_type='contours', color='blue', linestyle='solid')
ax.coords[1].grid(grid_type='contours', color='red', linestyle='solid')
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_plot_coord(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(266 * u.deg, -29 * u.deg)
ax.plot_coord(c, 'o')
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_plot_line(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord([266, 266.8] * u.deg, [-29, -28.9] * u.deg)
ax.plot_coord(c)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_changed_axis_units(self):
# Test to see if changing the units of axis works
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, 'y', 'x'), aspect='equal')
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_major_formatter('x.xx')
ax.coords[2].set_format_unit(u.km / u.s)
ax.coords[2].set_axislabel('Velocity km/s')
ax.coords[1].set_ticks(width=1)
ax.coords[2].set_ticks(width=1)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].set_ticklabel(exclude_overlapping=True)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_minor_ticks(self):
# Test for drawing minor ticks
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, 'y', 'x'), aspect='equal')
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].display_minor_ticks(True)
ax.coords[1].display_minor_ticks(True)
ax.coords[2].set_minor_frequency(3)
ax.coords[1].set_minor_frequency(10)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_ticks_labels(self):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.1, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.coords[0].set_ticks(size=10, color='blue', alpha=0.2, width=1)
ax.coords[1].set_ticks(size=20, color='red', alpha=0.9, width=1)
ax.coords[0].set_ticks_position('all')
ax.coords[1].set_ticks_position('all')
ax.coords[0].set_axislabel('X-axis', size=20)
ax.coords[1].set_axislabel('Y-axis', color='green', size=25,
weight='regular', style='normal',
family='cmtt10')
ax.coords[0].set_axislabel_position('t')
ax.coords[1].set_axislabel_position('r')
ax.coords[0].set_ticklabel(color='purple', size=15, alpha=1,
weight='light', style='normal',
family='cmss10')
ax.coords[1].set_ticklabel(color='black', size=18, alpha=0.9,
weight='bold', family='cmr10')
ax.coords[0].set_ticklabel_position('all')
ax.coords[1].set_ticklabel_position('r')
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_rcparams(self):
# Test custom rcParams
with rc_context({
'axes.labelcolor': 'purple',
'axes.labelsize': 14,
'axes.labelweight': 'bold',
'axes.linewidth': 3,
'axes.facecolor': '0.5',
'axes.edgecolor': 'green',
'xtick.color': 'red',
'xtick.labelsize': 8,
'xtick.direction': 'in',
'xtick.minor.visible': True,
'xtick.minor.size': 5,
'xtick.major.size': 20,
'xtick.major.width': 3,
'xtick.major.pad': 10,
'grid.color': 'blue',
'grid.linestyle': ':',
'grid.linewidth': 1,
'grid.alpha': 0.5}):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.15, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.grid()
ax.set_xlabel('X label')
ax.set_ylabel('Y label')
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_tick_angles(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels. Addresses #45, #46.
w = WCS()
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = 'ICRS'
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color='gray', alpha=0.5, linestyle='solid')
ax.coords['ra'].set_ticks(color='red', size=20)
ax.coords['dec'].set_ticks(color='red', size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_tick_angles_non_square_axes(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels, and the axes are
# non-square.
w = WCS()
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = 'ICRS'
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(6, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color='gray', alpha=0.5, linestyle='solid')
ax.coords['ra'].set_ticks(color='red', size=20)
ax.coords['dec'].set_ticks(color='red', size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_set_coord_type(self):
# Test for setting coord_type
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6],
projection=WCS(self.msx_header),
aspect='equal')
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_coord_type('scalar')
ax.coords[1].set_coord_type('scalar')
ax.coords[0].set_major_formatter('x.xxx')
ax.coords[1].set_major_formatter('x.xxx')
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_ticks_regression(self):
# Regression test for a bug that caused ticks aligned exactly with a
# sampled frame point to not appear. This also checks that tick labels
# don't get added more than once, and that no error occurs when e.g.
# the top part of the frame is all at the same coordinate as one of the
# potential ticks (which causes the tick angle calculation to return
# NaN).
wcs = WCS(self.slice_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5],
projection=wcs, aspect='auto')
limits = wcs.wcs_world2pix([0, 0], [35e3, 80e3], 0)[1]
ax.set_ylim(*limits)
ax.coords[0].set_ticks(spacing=0.002 * u.deg)
ax.coords[1].set_ticks(spacing=5 * u.km / u.s)
ax.coords[0].set_ticklabel(alpha=0.5) # to see multiple labels
ax.coords[1].set_ticklabel(alpha=0.5)
ax.coords[0].set_ticklabel_position('all')
ax.coords[1].set_ticklabel_position('all')
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
savefig_kwargs={'bbox_inches': 'tight'},
tolerance=0, style={})
def test_axislabels_regression(self):
if MPLDEV:
pytest.xfail('https://github.com/astropy/astropy/issues/8678')
# Regression test for a bug that meant that if tick labels were made
# invisible with ``set_visible(False)``, they were still added to the
# list of bounding boxes for tick labels, but with default values of 0
# to 1, which caused issues.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect='auto')
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
ax.coords[1].set_axislabel_visibility_rule('always')
ax.coords[1].ticklabels.set_visible(False)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
savefig_kwargs={'bbox_inches': 'tight'},
tolerance=0, style={})
def test_noncelestial_angular(self, tmpdir):
# Regression test for a bug that meant that when passing a WCS that had
# angular axes and using set_coord_type to set the coordinates to
# longitude/latitude, but where the WCS wasn't recognized as celestial,
# the WCS units are not converted to deg, so we can't assume that
# transform will always return degrees.
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['solar-x', 'solar-y']
wcs.wcs.cunit = ['arcsec', 'arcsec']
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(1, 1, 1, projection=wcs)
ax.imshow(np.zeros([1024, 1024]), origin='lower')
ax.coords[0].set_coord_type('longitude', coord_wrap=180)
ax.coords[1].set_coord_type('latitude')
ax.coords[0].set_major_formatter('s.s')
ax.coords[1].set_major_formatter('s.s')
ax.coords[0].set_format_unit(u.arcsec, show_decimal_unit=False)
ax.coords[1].set_format_unit(u.arcsec, show_decimal_unit=False)
ax.grid(color='white', ls='solid')
# Force drawing (needed for format_coord)
fig.savefig(tmpdir.join('nothing').strpath)
assert ax.format_coord(512, 512) == '513.0 513.0 (world)'
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
savefig_kwargs={'bbox_inches': 'tight'},
tolerance=0, style={})
def test_patches_distortion(self, tmpdir):
if MPLDEV:
pytest.xfail('https://github.com/astropy/astropy/issues/8678')
# Check how patches get distorted (and make sure that scatter markers
# and SphericalCircle don't)
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect='equal')
# Pixel coordinates
r = Rectangle((30., 50.), 60., 50., edgecolor='green', facecolor='none')
ax.add_patch(r)
# FK5 coordinates
r = Rectangle((266.4, -28.9), 0.3, 0.3, edgecolor='cyan', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(r)
# FK5 coordinates
c = Circle((266.4, -29.1), 0.15, edgecolor='magenta', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(c)
# Pixel coordinates
ax.scatter([40, 100, 130], [30, 130, 60], s=100, edgecolor='red', facecolor=(1, 0, 0, 0.5))
# World coordinates (should not be distorted)
ax.scatter(266.78238, -28.769255, transform=ax.get_transform('fk5'), s=300,
edgecolor='red', facecolor='none')
# World coordinates (should not be distorted)
r = SphericalCircle((266.4 * u.deg, -29.1 * u.deg), 0.15 * u.degree,
edgecolor='purple', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(r)
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticklabel_visible(False)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_elliptical_frame(self):
# Regression test for a bug (astropy/astropy#6063) that caused labels to
# be incorrectly simplified.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(5, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, frame_class=EllipticalFrame)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_hms_labels(self):
# This tests the apparance of the hms superscripts in tick labels
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.3, 0.2, 0.65, 0.6],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={'text.usetex': True})
def test_latex_labels(self):
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.3, 0.2, 0.65, 0.6],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_tick_params(self):
# This is a test to make sure that tick_params works correctly. We try
# and test as much as possible with a single reference image.
wcs = WCS()
wcs.wcs.ctype = ['lon', 'lat']
fig = plt.figure(figsize=(6, 6))
# The first subplot tests:
# - that plt.tick_params works
# - that by default both axes are changed
# - changing the tick direction and appearance, the label appearance and padding
ax = fig.add_subplot(2, 2, 1, projection=wcs)
plt.tick_params(direction='in', length=20, width=5, pad=6, labelsize=6,
color='red', labelcolor='blue')
# The second subplot tests:
# - that specifying grid parameters doesn't actually cause the grid to
# be shown (as expected)
# - that axis= can be given integer coordinates or their string name
# - that the tick positioning works (bottom/left/top/right)
# Make sure that we can pass things that can index coords
ax = fig.add_subplot(2, 2, 2, projection=wcs)
plt.tick_params(axis=0, direction='in', length=20, width=5, pad=4, labelsize=6,
color='red', labelcolor='blue', bottom=True, grid_color='purple')
plt.tick_params(axis='lat', direction='out', labelsize=8,
color='blue', labelcolor='purple', left=True, right=True,
grid_color='red')
# The third subplot tests:
# - that ax.tick_params works
# - that the grid has the correct settings once shown explicitly
# - that we can use axis='x' and axis='y'
ax = fig.add_subplot(2, 2, 3, projection=wcs)
ax.tick_params(axis='x', direction='in', length=20, width=5, pad=20, labelsize=6,
color='red', labelcolor='blue', bottom=True,
grid_color='purple')
ax.tick_params(axis='y', direction='out', labelsize=8,
color='blue', labelcolor='purple', left=True, right=True,
grid_color='red')
plt.grid()
# The final subplot tests:
# - that we can use tick_params on a specific coordinate
# - that the label positioning can be customized
# - that the colors argument works
# - that which='minor' works
ax = fig.add_subplot(2, 2, 4, projection=wcs)
ax.coords[0].tick_params(length=4, pad=2, colors='orange', labelbottom=True,
labeltop=True, labelsize=10)
ax.coords[1].display_minor_ticks(True)
ax.coords[1].tick_params(which='minor', length=6)
return fig
|
ab1aa808369067db626643556aef80b49aa8d6a7214bd3dcfbeb3ac7543b4b19 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.transforms import Affine2D, IdentityTransform
from astropy.wcs import WCS
from astropy.visualization.wcsaxes.transforms import WCSWorld2PixelTransform
WCS2D = WCS(naxis=2)
WCS2D.wcs.ctype = ['x', 'y']
WCS2D.wcs.cunit = ['km', 'km']
WCS2D.wcs.crpix = [614.5, 856.5]
WCS2D.wcs.cdelt = [6.25, 6.25]
WCS2D.wcs.crval = [0., 0.]
WCS3D = WCS(naxis=3)
WCS3D.wcs.ctype = ['x', 'y', 'z']
WCS3D.wcs.cunit = ['km', 'km', 'km']
WCS3D.wcs.crpix = [614.5, 856.5, 333]
WCS3D.wcs.cdelt = [6.25, 6.25, 23]
WCS3D.wcs.crval = [0., 0., 1.]
def test_shorthand_inversion():
"""Test that the Matplotlib subtraction shorthand for composing and
inverting transformations works."""
w1 = WCS(naxis=2)
w1.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w1.wcs.crpix = [256.0, 256.0]
w1.wcs.cdelt = [-0.05, 0.05]
w1.wcs.crval = [120.0, -19.0]
w2 = WCS(naxis=2)
w2.wcs.ctype = ['RA---SIN', 'DEC--SIN']
w2.wcs.crpix = [256.0, 256.0]
w2.wcs.cdelt = [-0.05, 0.05]
w2.wcs.crval = [235.0, +23.7]
t1 = WCSWorld2PixelTransform(w1)
t2 = WCSWorld2PixelTransform(w2)
assert t1 - t2 == t1 + t2.inverted()
assert t1 - t2 != t2.inverted() + t1
assert t1 - t1 == IdentityTransform()
# We add Affine2D to catch the fact that in Matplotlib, having a Composite
# transform can end up in more strict requirements for the dimensionality.
def test_2d():
world = np.ones((10, 2))
w1 = WCSWorld2PixelTransform(WCS2D) + Affine2D()
pixel = w1.transform(world)
world_2 = w1.inverted().transform(pixel)
np.testing.assert_allclose(world, world_2)
def test_3d():
world = np.ones((10, 3))
w1 = WCSWorld2PixelTransform(WCS3D, slice=('y', 0, 'x')) + Affine2D()
pixel = w1.transform(world)
world_2 = w1.inverted().transform(pixel)
np.testing.assert_allclose(world[:, 0], world_2[:, 0])
np.testing.assert_allclose(world[:, 2], world_2[:, 2])
|
167729acf0240b6ef224475ae124c16aae5b39a0cf196f0208296f27d290b2f1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the Slicing mixin to the NDData class.
from astropy import log
__all__ = ['NDSlicingMixin']
class NDSlicingMixin:
"""Mixin to provide slicing on objects using the `NDData`
interface.
The ``data``, ``mask``, ``uncertainty`` and ``wcs`` will be sliced, if
set and sliceable. The ``unit`` and ``meta`` will be untouched. The return
will be a reference and not a copy, if possible.
Examples
--------
Using this Mixin with `~astropy.nddata.NDData`:
>>> from astropy.nddata import NDData, NDSlicingMixin
>>> class NDDataSliceable(NDSlicingMixin, NDData):
... pass
Slicing an instance containing data::
>>> nd = NDDataSliceable([1,2,3,4,5])
>>> nd[1:3]
NDDataSliceable([2, 3])
Also the other attributes are sliced for example the ``mask``::
>>> import numpy as np
>>> mask = np.array([True, False, True, True, False])
>>> nd2 = NDDataSliceable(nd, mask=mask)
>>> nd2slc = nd2[1:3]
>>> nd2slc[nd2slc.mask]
NDDataSliceable([3])
Be aware that changing values of the sliced instance will change the values
of the original::
>>> nd3 = nd2[1:3]
>>> nd3.data[0] = 100
>>> nd2
NDDataSliceable([ 1, 100, 3, 4, 5])
See also
--------
NDDataRef
NDDataArray
"""
def __getitem__(self, item):
# Abort slicing if the data is a single scalar.
if self.data.shape == ():
raise TypeError('scalars cannot be sliced.')
# Let the other methods handle slicing.
kwargs = self._slice(item)
return self.__class__(**kwargs)
def _slice(self, item):
"""Collects the sliced attributes and passes them back as `dict`.
It passes uncertainty, mask and wcs to their appropriate ``_slice_*``
method, while ``meta`` and ``unit`` are simply taken from the original.
The data is assumed to be sliceable and is sliced directly.
When possible the return should *not* be a copy of the data but a
reference.
Parameters
----------
item : slice
The slice passed to ``__getitem__``.
Returns
-------
dict :
Containing all the attributes after slicing - ready to
use them to create ``self.__class__.__init__(**kwargs)`` in
``__getitem__``.
"""
kwargs = {}
kwargs['data'] = self.data[item]
# Try to slice some attributes
kwargs['uncertainty'] = self._slice_uncertainty(item)
kwargs['mask'] = self._slice_mask(item)
kwargs['wcs'] = self._slice_wcs(item)
# Attributes which are copied and not intended to be sliced
kwargs['unit'] = self.unit
kwargs['meta'] = self.meta
return kwargs
def _slice_uncertainty(self, item):
if self.uncertainty is None:
return None
try:
return self.uncertainty[item]
except TypeError:
# Catching TypeError in case the object has no __getitem__ method.
# But let IndexError raise.
log.info("uncertainty cannot be sliced.")
return self.uncertainty
def _slice_mask(self, item):
if self.mask is None:
return None
try:
return self.mask[item]
except TypeError:
log.info("mask cannot be sliced.")
return self.mask
def _slice_wcs(self, item):
if self.wcs is None:
return None
try:
return self.wcs[item]
except TypeError:
log.info("wcs cannot be sliced.")
return self.wcs
|
cb7fa539951671582c602927dd670a4bd0482824ab7f20b015407202b64730eb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the Arithmetic mixin to the NDData class.
from copy import deepcopy
import numpy as np
from astropy.nddata.nduncertainty import NDUncertainty
from astropy.units import dimensionless_unscaled
from astropy.utils import format_doc, sharedmethod
__all__ = ['NDArithmeticMixin']
# Global so it doesn't pollute the class dict unnecessarily:
# Docstring templates for add, subtract, multiply, divide methods.
_arit_doc = """
Performs {name} by evaluating ``self`` {op} ``operand``.
Parameters
----------
operand, operand2 : `NDData`-like instance or convertible to one.
If ``operand2`` is ``None`` or not given it will perform the operation
``self`` {op} ``operand``.
If ``operand2`` is given it will perform ``operand`` {op} ``operand2``.
If the method was called on a class rather than on the instance
``operand2`` must be given.
propagate_uncertainties : `bool` or ``None``, optional
If ``None`` the result will have no uncertainty. If ``False`` the
result will have a copied version of the first operand that has an
uncertainty. If ``True`` the result will have a correctly propagated
uncertainty from the uncertainties of the operands but this assumes
that the uncertainties are `NDUncertainty`-like. Default is ``True``.
.. versionchanged:: 1.2
This parameter must be given as keyword-parameter. Using it as
positional parameter is deprecated.
``None`` was added as valid parameter value.
handle_mask : callable, ``'first_found'`` or ``None``, optional
If ``None`` the result will have no mask. If ``'first_found'`` the
result will have a copied version of the first operand that has a
mask). If it is a callable then the specified callable must
create the results ``mask`` and if necessary provide a copy.
Default is `numpy.logical_or`.
.. versionadded:: 1.2
handle_meta : callable, ``'first_found'`` or ``None``, optional
If ``None`` the result will have no meta. If ``'first_found'`` the
result will have a copied version of the first operand that has a
(not empty) meta. If it is a callable then the specified callable must
create the results ``meta`` and if necessary provide a copy.
Default is ``None``.
.. versionadded:: 1.2
compare_wcs : callable, ``'first_found'`` or ``None``, optional
If ``None`` the result will have no wcs and no comparison between
the wcs of the operands is made. If ``'first_found'`` the
result will have a copied version of the first operand that has a
wcs. If it is a callable then the specified callable must
compare the ``wcs``. The resulting ``wcs`` will be like if ``False``
was given otherwise it raises a ``ValueError`` if the comparison was
not successful. Default is ``'first_found'``.
.. versionadded:: 1.2
uncertainty_correlation : number or `~numpy.ndarray`, optional
The correlation between the two operands is used for correct error
propagation for correlated data as given in:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulas
Default is 0.
.. versionadded:: 1.2
kwargs :
Any other parameter that should be passed to the callables used.
Returns
-------
result : `~astropy.nddata.NDData`-like
The resulting dataset
Notes
-----
If a ``callable`` is used for ``mask``, ``wcs`` or ``meta`` the
callable must accept the corresponding attributes as first two
parameters. If the callable also needs additional parameters these can be
defined as ``kwargs`` and must start with ``"wcs_"`` (for wcs callable) or
``"meta_"`` (for meta callable). This startstring is removed before the
callable is called.
``"first_found"`` can also be abbreviated with ``"ff"``.
"""
class NDArithmeticMixin:
"""
Mixin class to add arithmetic to an NDData object.
When subclassing, be sure to list the superclasses in the correct order
so that the subclass sees NDData as the main superclass. See
`~astropy.nddata.NDDataArray` for an example.
Notes
-----
This class only aims at covering the most common cases so there are certain
restrictions on the saved attributes::
- ``uncertainty`` : has to be something that has a `NDUncertainty`-like
interface for uncertainty propagation
- ``mask`` : has to be something that can be used by a bitwise ``or``
operation.
- ``wcs`` : has to implement a way of comparing with ``=`` to allow
the operation.
But there is a workaround that allows to disable handling a specific
attribute and to simply set the results attribute to ``None`` or to
copy the existing attribute (and neglecting the other).
For example for uncertainties not representing an `NDUncertainty`-like
interface you can alter the ``propagate_uncertainties`` parameter in
:meth:`NDArithmeticMixin.add`. ``None`` means that the result will have no
uncertainty, ``False`` means it takes the uncertainty of the first operand
(if this does not exist from the second operand) as the result's
uncertainty. This behavior is also explained in the docstring for the
different arithmetic operations.
Decomposing the units is not attempted, mainly due to the internal mechanics
of `~astropy.units.Quantity`, so the resulting data might have units like
``km/m`` if you divided for example 100km by 5m. So this Mixin has adopted
this behavior.
Examples
--------
Using this Mixin with `~astropy.nddata.NDData`:
>>> from astropy.nddata import NDData, NDArithmeticMixin
>>> class NDDataWithMath(NDArithmeticMixin, NDData):
... pass
Using it with one operand on an instance::
>>> ndd = NDDataWithMath(100)
>>> ndd.add(20)
NDDataWithMath(120)
Using it with two operand on an instance::
>>> ndd = NDDataWithMath(-4)
>>> ndd.divide(1, ndd)
NDDataWithMath(-0.25)
Using it as classmethod requires two operands::
>>> NDDataWithMath.subtract(5, 4)
NDDataWithMath(1)
"""
def _arithmetic(self, operation, operand,
propagate_uncertainties=True, handle_mask=np.logical_or,
handle_meta=None, uncertainty_correlation=0,
compare_wcs='first_found', **kwds):
"""
Base method which calculates the result of the arithmetic operation.
This method determines the result of the arithmetic operation on the
``data`` including their units and then forwards to other methods
to calculate the other properties for the result (like uncertainty).
Parameters
----------
operation : callable
The operation that is performed on the `NDData`. Supported are
`numpy.add`, `numpy.subtract`, `numpy.multiply` and
`numpy.true_divide`.
operand : same type (class) as self
see :meth:`NDArithmeticMixin.add`
propagate_uncertainties : `bool` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
handle_mask : callable, ``'first_found'`` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
handle_meta : callable, ``'first_found'`` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
compare_wcs : callable, ``'first_found'`` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
uncertainty_correlation : ``Number`` or `~numpy.ndarray`, optional
see :meth:`NDArithmeticMixin.add`
kwargs :
Any other parameter that should be passed to the
different :meth:`NDArithmeticMixin._arithmetic_mask` (or wcs, ...)
methods.
Returns
-------
result : `~numpy.ndarray` or `~astropy.units.Quantity`
The resulting data as array (in case both operands were without
unit) or as quantity if at least one had a unit.
kwargs : `dict`
The kwargs should contain all the other attributes (besides data
and unit) needed to create a new instance for the result. Creating
the new instance is up to the calling method, for example
:meth:`NDArithmeticMixin.add`.
"""
# Find the appropriate keywords for the appropriate method (not sure
# if data and uncertainty are ever used ...)
kwds2 = {'mask': {}, 'meta': {}, 'wcs': {},
'data': {}, 'uncertainty': {}}
for i in kwds:
splitted = i.split('_', 1)
try:
kwds2[splitted[0]][splitted[1]] = kwds[i]
except KeyError:
raise KeyError('Unknown prefix {0} for parameter {1}'
''.format(splitted[0], i))
kwargs = {}
# First check that the WCS allows the arithmetic operation
if compare_wcs is None:
kwargs['wcs'] = None
elif compare_wcs in ['ff', 'first_found']:
if self.wcs is None:
kwargs['wcs'] = deepcopy(operand.wcs)
else:
kwargs['wcs'] = deepcopy(self.wcs)
else:
kwargs['wcs'] = self._arithmetic_wcs(operation, operand,
compare_wcs, **kwds2['wcs'])
# Then calculate the resulting data (which can but not needs to be a
# quantity)
result = self._arithmetic_data(operation, operand, **kwds2['data'])
# Determine the other properties
if propagate_uncertainties is None:
kwargs['uncertainty'] = None
elif not propagate_uncertainties:
if self.uncertainty is None:
kwargs['uncertainty'] = deepcopy(operand.uncertainty)
else:
kwargs['uncertainty'] = deepcopy(self.uncertainty)
else:
kwargs['uncertainty'] = self._arithmetic_uncertainty(
operation, operand, result, uncertainty_correlation,
**kwds2['uncertainty'])
if handle_mask is None:
kwargs['mask'] = None
elif handle_mask in ['ff', 'first_found']:
if self.mask is None:
kwargs['mask'] = deepcopy(operand.mask)
else:
kwargs['mask'] = deepcopy(self.mask)
else:
kwargs['mask'] = self._arithmetic_mask(operation, operand,
handle_mask,
**kwds2['mask'])
if handle_meta is None:
kwargs['meta'] = None
elif handle_meta in ['ff', 'first_found']:
if not self.meta:
kwargs['meta'] = deepcopy(operand.meta)
else:
kwargs['meta'] = deepcopy(self.meta)
else:
kwargs['meta'] = self._arithmetic_meta(
operation, operand, handle_meta, **kwds2['meta'])
# Wrap the individual results into a new instance of the same class.
return result, kwargs
def _arithmetic_data(self, operation, operand, **kwds):
"""
Calculate the resulting data
Parameters
----------
operation : callable
see `NDArithmeticMixin._arithmetic` parameter description.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
kwds :
Additional parameters.
Returns
-------
result_data : `~numpy.ndarray` or `~astropy.units.Quantity`
If both operands had no unit the resulting data is a simple numpy
array, but if any of the operands had a unit the return is a
Quantity.
"""
# Do the calculation with or without units
if self.unit is None and operand.unit is None:
result = operation(self.data, operand.data)
elif self.unit is None:
result = operation(self.data * dimensionless_unscaled,
operand.data * operand.unit)
elif operand.unit is None:
result = operation(self.data * self.unit,
operand.data * dimensionless_unscaled)
else:
result = operation(self.data * self.unit,
operand.data * operand.unit)
return result
def _arithmetic_uncertainty(self, operation, operand, result, correlation,
**kwds):
"""
Calculate the resulting uncertainty.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
result : `~astropy.units.Quantity` or `~numpy.ndarray`
The result of :meth:`NDArithmeticMixin._arithmetic_data`.
correlation : number or `~numpy.ndarray`
see :meth:`NDArithmeticMixin.add` parameter description.
kwds :
Additional parameters.
Returns
-------
result_uncertainty : `NDUncertainty` subclass instance or None
The resulting uncertainty already saved in the same `NDUncertainty`
subclass that ``self`` had (or ``operand`` if self had no
uncertainty). ``None`` only if both had no uncertainty.
"""
# Make sure these uncertainties are NDUncertainties so this kind of
# propagation is possible.
if (self.uncertainty is not None and
not isinstance(self.uncertainty, NDUncertainty)):
raise TypeError("Uncertainty propagation is only defined for "
"subclasses of NDUncertainty.")
if (operand.uncertainty is not None and
not isinstance(operand.uncertainty, NDUncertainty)):
raise TypeError("Uncertainty propagation is only defined for "
"subclasses of NDUncertainty.")
# Now do the uncertainty propagation
# TODO: There is no enforced requirement that actually forbids the
# uncertainty to have negative entries but with correlation the
# sign of the uncertainty DOES matter.
if self.uncertainty is None and operand.uncertainty is None:
# Neither has uncertainties so the result should have none.
return None
elif self.uncertainty is None:
# Create a temporary uncertainty to allow uncertainty propagation
# to yield the correct results. (issue #4152)
self.uncertainty = operand.uncertainty.__class__(None)
result_uncert = self.uncertainty.propagate(operation, operand,
result, correlation)
# Delete the temporary uncertainty again.
self.uncertainty = None
return result_uncert
elif operand.uncertainty is None:
# As with self.uncertainty is None but the other way around.
operand.uncertainty = self.uncertainty.__class__(None)
result_uncert = self.uncertainty.propagate(operation, operand,
result, correlation)
operand.uncertainty = None
return result_uncert
else:
# Both have uncertainties so just propagate.
return self.uncertainty.propagate(operation, operand, result,
correlation)
def _arithmetic_mask(self, operation, operand, handle_mask, **kwds):
"""
Calculate the resulting mask
This is implemented as the piecewise ``or`` operation if both have a
mask.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
By default, the ``operation`` will be ignored.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
handle_mask : callable
see :meth:`NDArithmeticMixin.add`
kwds :
Additional parameters given to ``handle_mask``.
Returns
-------
result_mask : any type
If only one mask was present this mask is returned.
If neither had a mask ``None`` is returned. Otherwise
``handle_mask`` must create (and copy) the returned mask.
"""
# If only one mask is present we need not bother about any type checks
if self.mask is None and operand.mask is None:
return None
elif self.mask is None:
# Make a copy so there is no reference in the result.
return deepcopy(operand.mask)
elif operand.mask is None:
return deepcopy(self.mask)
else:
# Now lets calculate the resulting mask (operation enforces copy)
return handle_mask(self.mask, operand.mask, **kwds)
def _arithmetic_wcs(self, operation, operand, compare_wcs, **kwds):
"""
Calculate the resulting wcs.
There is actually no calculation involved but it is a good place to
compare wcs information of both operands. This is currently not working
properly with `~astropy.wcs.WCS` (which is the suggested class for
storing as wcs property) but it will not break it neither.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
By default, the ``operation`` will be ignored.
operand : `NDData` instance or subclass
The second operand wrapped in an instance of the same class as
self.
compare_wcs : callable
see :meth:`NDArithmeticMixin.add` parameter description.
kwds :
Additional parameters given to ``compare_wcs``.
Raises
------
ValueError
If ``compare_wcs`` returns ``False``.
Returns
-------
result_wcs : any type
The ``wcs`` of the first operand is returned.
"""
# ok, not really arithmetics but we need to check which wcs makes sense
# for the result and this is an ideal place to compare the two WCS,
# too.
# I'll assume that the comparison returned None or False in case they
# are not equal.
if not compare_wcs(self.wcs, operand.wcs, **kwds):
raise ValueError("WCS are not equal.")
return self.wcs
def _arithmetic_meta(self, operation, operand, handle_meta, **kwds):
"""
Calculate the resulting meta.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
By default, the ``operation`` will be ignored.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
handle_meta : callable
see :meth:`NDArithmeticMixin.add`
kwds :
Additional parameters given to ``handle_meta``.
Returns
-------
result_meta : any type
The result of ``handle_meta``.
"""
# Just return what handle_meta does with both of the metas.
return handle_meta(self.meta, operand.meta, **kwds)
@sharedmethod
@format_doc(_arit_doc, name='addition', op='+')
def add(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(np.add, operand, operand2,
**kwargs)
@sharedmethod
@format_doc(_arit_doc, name='subtraction', op='-')
def subtract(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(np.subtract, operand, operand2,
**kwargs)
@sharedmethod
@format_doc(_arit_doc, name="multiplication", op="*")
def multiply(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(np.multiply, operand, operand2,
**kwargs)
@sharedmethod
@format_doc(_arit_doc, name="division", op="/")
def divide(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(np.true_divide, operand,
operand2, **kwargs)
@sharedmethod
def _prepare_then_do_arithmetic(self_or_cls, operation, operand, operand2,
**kwargs):
"""Intermediate method called by public arithmetics (i.e. ``add``)
before the processing method (``_arithmetic``) is invoked.
.. warning::
Do not override this method in subclasses.
This method checks if it was called as instance or as class method and
then wraps the operands and the result from ``_arithmetics`` in the
appropriate subclass.
Parameters
----------
self_or_cls : instance or class
``sharedmethod`` behaves like a normal method if called on the
instance (then this parameter is ``self``) but like a classmethod
when called on the class (then this parameter is ``cls``).
operations : callable
The operation (normally a numpy-ufunc) that represents the
appropriate action.
operand, operand2, kwargs :
See for example ``add``.
Result
------
result : `~astropy.nddata.NDData`-like
Depending how this method was called either ``self_or_cls``
(called on class) or ``self_or_cls.__class__`` (called on instance)
is the NDData-subclass that is used as wrapper for the result.
"""
# DO NOT OVERRIDE THIS METHOD IN SUBCLASSES.
if isinstance(self_or_cls, NDArithmeticMixin):
# True means it was called on the instance, so self_or_cls is
# a reference to self
cls = self_or_cls.__class__
if operand2 is None:
# Only one operand was given. Set operand2 to operand and
# operand to self so that we call the appropriate method of the
# operand.
operand2 = operand
operand = self_or_cls
else:
# Convert the first operand to the class of this method.
# This is important so that always the correct _arithmetics is
# called later that method.
operand = cls(operand)
else:
# It was used as classmethod so self_or_cls represents the cls
cls = self_or_cls
# It was called on the class so we expect two operands!
if operand2 is None:
raise TypeError("operand2 must be given when the method isn't "
"called on an instance.")
# Convert to this class. See above comment why.
operand = cls(operand)
# At this point operand, operand2, kwargs and cls are determined.
# Let's try to convert operand2 to the class of operand to allows for
# arithmetic operations with numbers, lists, numpy arrays, numpy masked
# arrays, astropy quantities, masked quantities and of other subclasses
# of NDData.
operand2 = cls(operand2)
# Now call the _arithmetics method to do the arithmetics.
result, init_kwds = operand._arithmetic(operation, operand2, **kwargs)
# Return a new class based on the result
return cls(result, **init_kwds)
|
71abe7a2c8678b85c49a30184e76f14e87570261adcd3cf9a9b8b57d79c8a68f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the I/O mixin to the NDData class.
from astropy.io import registry
__all__ = ['NDIOMixin']
__doctest_skip__ = ['NDDataRead', 'NDDataWrite']
class NDDataRead(registry.UnifiedReadWrite):
"""Read and parse gridded N-dimensional data and return as an NDData-derived
object.
This function provides the NDDataBase interface to the astropy unified I/O
layer. This allows easily reading a file in the supported data formats,
for example::
>>> from astropy.nddata import CCDData
>>> dat = CCDData.read('image.fits')
Get help on the available readers for ``CCDData`` using the``help()`` method::
>>> CCDData.read.help() # Get help reading CCDData and list supported formats
>>> CCDData.read.help('fits') # Get detailed help on CCDData FITS reader
>>> CCDData.read.list_formats() # Print list of available formats
See also:
- http://docs.astropy.org/en/stable/nddata
- http://docs.astropy.org/en/stable/io/unified.html
Parameters
----------
*args : tuple, optional
Positional arguments passed through to data reader. If supplied the
first argument is the input filename.
format : str, optional
File format specifier.
**kwargs : dict, optional
Keyword arguments passed through to data reader.
Returns
-------
out : `NDData` subclass
NDData-basd object corresponding to file contents
Notes
-----
"""
def __init__(self, instance, cls):
super().__init__(instance, cls, 'read')
def __call__(self, *args, **kwargs):
return registry.read(self._cls, *args, **kwargs)
class NDDataWrite(registry.UnifiedReadWrite):
"""Write this CCDData object out in the specified format.
This function provides the NDData interface to the astropy unified I/O
layer. This allows easily writing a file in many supported data formats
using syntax such as::
>>> from astropy.nddata import CCDData
>>> dat = CCDData(np.zeros((12, 12)), unit='adu') # 12x12 image of zeros
>>> dat.write('zeros.fits')
Get help on the available writers for ``CCDData`` using the``help()`` method::
>>> CCDData.write.help() # Get help writing CCDData and list supported formats
>>> CCDData.write.help('fits') # Get detailed help on CCDData FITS writer
>>> CCDData.write.list_formats() # Print list of available formats
See also:
- http://docs.astropy.org/en/stable/nddata
- http://docs.astropy.org/en/stable/io/unified.html
Parameters
----------
*args : tuple, optional
Positional arguments passed through to data writer. If supplied the
first argument is the output filename.
format : str, optional
File format specifier.
**kwargs : dict, optional
Keyword arguments passed through to data writer.
Notes
-----
"""
def __init__(self, instance, cls):
super().__init__(instance, cls, 'write')
def __call__(self, *args, **kwargs):
registry.write(self._instance, *args, **kwargs)
class NDIOMixin:
"""
Mixin class to connect NDData to the astropy input/output registry.
This mixin adds two methods to its subclasses, ``read`` and ``write``.
"""
read = registry.UnifiedReadWriteMethod(NDDataRead)
write = registry.UnifiedReadWriteMethod(NDDataWrite)
|
e760cd38c48e845cf66ad111a38409eeffbc58d238ab0eac89a77a87301b8a8d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the base CCDData class.
import textwrap
import numpy as np
import pytest
from astropy.io import fits
from astropy.nddata.nduncertainty import (
StdDevUncertainty, MissingDataAssociationException, VarianceUncertainty,
InverseVariance)
from astropy import units as u
from astropy import log
from astropy.wcs import WCS, FITSFixedWarning
from astropy.tests.helper import catch_warnings
from astropy.utils import NumpyRNGContext
from astropy.utils.data import (get_pkg_data_filename, get_pkg_data_filenames,
get_pkg_data_contents)
from astropy.nddata.ccddata import CCDData
from astropy.table import Table
# If additional pytest markers are defined the key in the dictionary below
# should be the name of the marker.
DEFAULTS = {
'seed': 123,
'data_size': 100,
'data_scale': 1.0,
'data_mean': 0.0
}
DEFAULT_SEED = 123
DEFAULT_DATA_SIZE = 100
DEFAULT_DATA_SCALE = 1.0
def value_from_markers(key, request):
m = request.node.get_closest_marker(key)
if m is not None:
return m.args[0]
else:
return DEFAULTS[key]
@pytest.fixture
def ccd_data(request):
"""
Return a CCDData object with units of ADU.
The size of the data array is 100x100 but can be changed using the marker
@pytest.mark.data_size(N) on the test function, where N should be the
desired dimension.
Data values are initialized to random numbers drawn from a normal
distribution with mean of 0 and scale 1.
The scale can be changed with the marker @pytest.marker.scale(s) on the
test function, where s is the desired scale.
The mean can be changed with the marker @pytest.marker.scale(m) on the
test function, where m is the desired mean.
"""
size = value_from_markers('data_size', request)
scale = value_from_markers('data_scale', request)
mean = value_from_markers('data_mean', request)
with NumpyRNGContext(DEFAULTS['seed']):
data = np.random.normal(loc=mean, size=[size, size], scale=scale)
fake_meta = {'my_key': 42, 'your_key': 'not 42'}
ccd = CCDData(data, unit=u.adu)
ccd.header = fake_meta
return ccd
def test_ccddata_empty():
with pytest.raises(TypeError):
CCDData() # empty initializer should fail
def test_ccddata_must_have_unit():
with pytest.raises(ValueError):
CCDData(np.zeros([100, 100]))
def test_ccddata_unit_cannot_be_set_to_none(ccd_data):
with pytest.raises(TypeError):
ccd_data.unit = None
def test_ccddata_meta_header_conflict():
with pytest.raises(ValueError) as exc:
CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2})
assert "can't have both header and meta." in str(exc)
@pytest.mark.data_size(10)
def test_ccddata_simple(ccd_data):
assert ccd_data.shape == (10, 10)
assert ccd_data.size == 100
assert ccd_data.dtype == np.dtype(float)
def test_ccddata_init_with_string_electron_unit():
ccd = CCDData(np.zeros((10, 10)), unit="electron")
assert ccd.unit is u.electron
@pytest.mark.data_size(10)
def test_initialize_from_FITS(ccd_data, tmpdir):
hdu = fits.PrimaryHDU(ccd_data)
hdulist = fits.HDUList([hdu])
filename = tmpdir.join('afile.fits').strpath
hdulist.writeto(filename)
cd = CCDData.read(filename, unit=u.electron)
assert cd.shape == (10, 10)
assert cd.size == 100
assert np.issubdtype(cd.data.dtype, np.floating)
for k, v in hdu.header.items():
assert cd.meta[k] == v
def test_initialize_from_fits_with_unit_in_header(tmpdir):
fake_img = np.random.random(size=(100, 100))
hdu = fits.PrimaryHDU(fake_img)
hdu.header['bunit'] = u.adu.to_string()
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
ccd = CCDData.read(filename)
# ccd should pick up the unit adu from the fits header...did it?
assert ccd.unit is u.adu
# An explicit unit in the read overrides any unit in the FITS file
ccd2 = CCDData.read(filename, unit="photon")
assert ccd2.unit is u.photon
def test_initialize_from_fits_with_ADU_in_header(tmpdir):
fake_img = np.random.random(size=(100, 100))
hdu = fits.PrimaryHDU(fake_img)
hdu.header['bunit'] = 'ADU'
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
ccd = CCDData.read(filename)
# ccd should pick up the unit adu from the fits header...did it?
assert ccd.unit is u.adu
def test_initialize_from_fits_with_invalid_unit_in_header(tmpdir):
hdu = fits.PrimaryHDU(np.ones((2, 2)))
hdu.header['bunit'] = 'definetely-not-a-unit'
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
with pytest.raises(ValueError):
CCDData.read(filename)
def test_initialize_from_fits_with_data_in_different_extension(tmpdir):
fake_img = np.random.random(size=(100, 100))
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(fake_img)
hdus = fits.HDUList([hdu1, hdu2])
filename = tmpdir.join('afile.fits').strpath
hdus.writeto(filename)
with catch_warnings(FITSFixedWarning) as w:
ccd = CCDData.read(filename, unit='adu')
assert len(w) == 0
# ccd should pick up the unit adu from the fits header...did it?
np.testing.assert_array_equal(ccd.data, fake_img)
# check that the header is the combined header
assert hdu2.header + hdu1.header == ccd.header
def test_initialize_from_fits_with_extension(tmpdir):
fake_img1 = np.random.random(size=(100, 100))
fake_img2 = np.random.random(size=(100, 100))
hdu0 = fits.PrimaryHDU()
hdu1 = fits.ImageHDU(fake_img1)
hdu2 = fits.ImageHDU(fake_img2)
hdus = fits.HDUList([hdu0, hdu1, hdu2])
filename = tmpdir.join('afile.fits').strpath
hdus.writeto(filename)
ccd = CCDData.read(filename, hdu=2, unit='adu')
# ccd should pick up the unit adu from the fits header...did it?
np.testing.assert_array_equal(ccd.data, fake_img2)
def test_write_unit_to_hdu(ccd_data, tmpdir):
ccd_unit = ccd_data.unit
hdulist = ccd_data.to_hdu()
assert 'bunit' in hdulist[0].header
assert hdulist[0].header['bunit'] == ccd_unit.to_string()
def test_initialize_from_FITS_bad_keyword_raises_error(ccd_data, tmpdir):
# There are two fits.open keywords that are not permitted in ccdproc:
# do_not_scale_image_data and scale_back
filename = tmpdir.join('test.fits').strpath
ccd_data.write(filename)
with pytest.raises(TypeError):
CCDData.read(filename, unit=ccd_data.unit,
do_not_scale_image_data=True)
with pytest.raises(TypeError):
CCDData.read(filename, unit=ccd_data.unit, scale_back=True)
def test_ccddata_writer(ccd_data, tmpdir):
filename = tmpdir.join('test.fits').strpath
ccd_data.write(filename)
ccd_disk = CCDData.read(filename, unit=ccd_data.unit)
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
def test_ccddata_meta_is_case_sensitive(ccd_data):
key = 'SoMeKEY'
ccd_data.meta[key] = 10
assert key.lower() not in ccd_data.meta
assert key.upper() not in ccd_data.meta
assert key in ccd_data.meta
def test_ccddata_meta_is_not_fits_header(ccd_data):
ccd_data.meta = {'OBSERVER': 'Edwin Hubble'}
assert not isinstance(ccd_data.meta, fits.Header)
def test_fromMEF(ccd_data, tmpdir):
hdu = fits.PrimaryHDU(ccd_data)
hdu2 = fits.PrimaryHDU(2 * ccd_data.data)
hdulist = fits.HDUList(hdu)
hdulist.append(hdu2)
filename = tmpdir.join('afile.fits').strpath
hdulist.writeto(filename)
# by default, we reading from the first extension
cd = CCDData.read(filename, unit=u.electron)
np.testing.assert_array_equal(cd.data, ccd_data.data)
# but reading from the second should work too
cd = CCDData.read(filename, hdu=1, unit=u.electron)
np.testing.assert_array_equal(cd.data, 2 * ccd_data.data)
def test_metafromheader(ccd_data):
hdr = fits.header.Header()
hdr['observer'] = 'Edwin Hubble'
hdr['exptime'] = '3600'
d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron)
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
assert d1.header['OBSERVER'] == 'Edwin Hubble'
def test_metafromdict():
dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600}
d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron)
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
def test_header2meta():
hdr = fits.header.Header()
hdr['observer'] = 'Edwin Hubble'
hdr['exptime'] = '3600'
d1 = CCDData(np.ones((5, 5)), unit=u.electron)
d1.header = hdr
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
assert d1.header['OBSERVER'] == 'Edwin Hubble'
def test_metafromstring_fail():
hdr = 'this is not a valid header'
with pytest.raises(TypeError):
CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu)
def test_setting_bad_uncertainty_raises_error(ccd_data):
with pytest.raises(TypeError):
# Uncertainty is supposed to be an instance of NDUncertainty
ccd_data.uncertainty = 10
def test_setting_uncertainty_with_array(ccd_data):
ccd_data.uncertainty = None
fake_uncertainty = np.sqrt(np.abs(ccd_data.data))
ccd_data.uncertainty = fake_uncertainty.copy()
np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty)
def test_setting_uncertainty_wrong_shape_raises_error(ccd_data):
with pytest.raises(ValueError):
ccd_data.uncertainty = np.random.random(size=(3, 4))
def test_to_hdu(ccd_data):
ccd_data.meta = {'observer': 'Edwin Hubble'}
fits_hdulist = ccd_data.to_hdu()
assert isinstance(fits_hdulist, fits.HDUList)
for k, v in ccd_data.meta.items():
assert fits_hdulist[0].header[k] == v
np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data)
def test_copy(ccd_data):
ccd_copy = ccd_data.copy()
np.testing.assert_array_equal(ccd_copy.data, ccd_data.data)
assert ccd_copy.unit == ccd_data.unit
assert ccd_copy.meta == ccd_data.meta
@pytest.mark.parametrize('operation,affects_uncertainty', [
("multiply", True),
("divide", True),
])
@pytest.mark.parametrize('operand', [
2.0,
2 * u.dimensionless_unscaled,
2 * u.photon / u.adu,
])
@pytest.mark.parametrize('with_uncertainty', [
True,
False])
@pytest.mark.data_unit(u.adu)
def test_mult_div_overload(ccd_data, operand, with_uncertainty,
operation, affects_uncertainty):
if with_uncertainty:
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
method = ccd_data.__getattribute__(operation)
np_method = np.__getattribute__(operation)
result = method(operand)
assert result is not ccd_data
assert isinstance(result, CCDData)
assert (result.uncertainty is None or
isinstance(result.uncertainty, StdDevUncertainty))
try:
op_value = operand.value
except AttributeError:
op_value = operand
np.testing.assert_array_equal(result.data,
np_method(ccd_data.data, op_value))
if with_uncertainty:
if affects_uncertainty:
np.testing.assert_array_equal(result.uncertainty.array,
np_method(ccd_data.uncertainty.array,
op_value))
else:
np.testing.assert_array_equal(result.uncertainty.array,
ccd_data.uncertainty.array)
else:
assert result.uncertainty is None
if isinstance(operand, u.Quantity):
# Need the "1 *" below to force arguments to be Quantity to work around
# astropy/astropy#2377
expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit
assert result.unit == expected_unit
else:
assert result.unit == ccd_data.unit
@pytest.mark.parametrize('operation,affects_uncertainty', [
("add", False),
("subtract", False),
])
@pytest.mark.parametrize('operand,expect_failure', [
(2.0, u.UnitsError), # fail--units don't match image
(2 * u.dimensionless_unscaled, u.UnitsError), # same
(2 * u.adu, False),
])
@pytest.mark.parametrize('with_uncertainty', [
True,
False])
@pytest.mark.data_unit(u.adu)
def test_add_sub_overload(ccd_data, operand, expect_failure, with_uncertainty,
operation, affects_uncertainty):
if with_uncertainty:
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
method = ccd_data.__getattribute__(operation)
np_method = np.__getattribute__(operation)
if expect_failure:
with pytest.raises(expect_failure):
result = method(operand)
return
else:
result = method(operand)
assert result is not ccd_data
assert isinstance(result, CCDData)
assert (result.uncertainty is None or
isinstance(result.uncertainty, StdDevUncertainty))
try:
op_value = operand.value
except AttributeError:
op_value = operand
np.testing.assert_array_equal(result.data,
np_method(ccd_data.data, op_value))
if with_uncertainty:
if affects_uncertainty:
np.testing.assert_array_equal(result.uncertainty.array,
np_method(ccd_data.uncertainty.array,
op_value))
else:
np.testing.assert_array_equal(result.uncertainty.array,
ccd_data.uncertainty.array)
else:
assert result.uncertainty is None
if isinstance(operand, u.Quantity):
assert (result.unit == ccd_data.unit and result.unit == operand.unit)
else:
assert result.unit == ccd_data.unit
def test_arithmetic_overload_fails(ccd_data):
with pytest.raises(TypeError):
ccd_data.multiply("five")
with pytest.raises(TypeError):
ccd_data.divide("five")
with pytest.raises(TypeError):
ccd_data.add("five")
with pytest.raises(TypeError):
ccd_data.subtract("five")
def test_arithmetic_no_wcs_compare():
ccd = CCDData(np.ones((10, 10)), unit='')
assert ccd.add(ccd, compare_wcs=None).wcs is None
assert ccd.subtract(ccd, compare_wcs=None).wcs is None
assert ccd.multiply(ccd, compare_wcs=None).wcs is None
assert ccd.divide(ccd, compare_wcs=None).wcs is None
def test_arithmetic_with_wcs_compare():
def return_diff_smaller_3(first, second):
return abs(first - second) <= 3
ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2)
ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5)
assert ccd1.add(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2
assert ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2
assert ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2
assert ccd1.divide(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2
def test_arithmetic_with_wcs_compare_fail():
def return_diff_smaller_1(first, second):
return abs(first - second) <= 1
ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2)
ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5)
with pytest.raises(ValueError):
ccd1.add(ccd2, compare_wcs=return_diff_smaller_1).wcs
with pytest.raises(ValueError):
ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_1).wcs
with pytest.raises(ValueError):
ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_1).wcs
with pytest.raises(ValueError):
ccd1.divide(ccd2, compare_wcs=return_diff_smaller_1).wcs
def test_arithmetic_overload_ccddata_operand(ccd_data):
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
operand = ccd_data.copy()
result = ccd_data.add(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
2 * ccd_data.data)
np.testing.assert_array_almost_equal_nulp(
result.uncertainty.array,
np.sqrt(2) * ccd_data.uncertainty.array
)
result = ccd_data.subtract(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
0 * ccd_data.data)
np.testing.assert_array_almost_equal_nulp(
result.uncertainty.array,
np.sqrt(2) * ccd_data.uncertainty.array
)
result = ccd_data.multiply(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
ccd_data.data ** 2)
expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) *
ccd_data.uncertainty.array)
np.testing.assert_allclose(result.uncertainty.array,
expected_uncertainty)
result = ccd_data.divide(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
np.ones_like(ccd_data.data))
expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) *
ccd_data.uncertainty.array)
np.testing.assert_allclose(result.uncertainty.array,
expected_uncertainty)
def test_arithmetic_overload_differing_units():
a = np.array([1, 2, 3]) * u.m
b = np.array([1, 2, 3]) * u.cm
ccddata = CCDData(a)
# TODO: Could also be parametrized.
res = ccddata.add(b)
np.testing.assert_array_almost_equal(res.data, np.add(a, b).value)
assert res.unit == np.add(a, b).unit
res = ccddata.subtract(b)
np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value)
assert res.unit == np.subtract(a, b).unit
res = ccddata.multiply(b)
np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value)
assert res.unit == np.multiply(a, b).unit
res = ccddata.divide(b)
np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value)
assert res.unit == np.divide(a, b).unit
def test_arithmetic_add_with_array():
ccd = CCDData(np.ones((3, 3)), unit='')
res = ccd.add(np.arange(3))
np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3)
ccd = CCDData(np.ones((3, 3)), unit='adu')
with pytest.raises(ValueError):
ccd.add(np.arange(3))
def test_arithmetic_subtract_with_array():
ccd = CCDData(np.ones((3, 3)), unit='')
res = ccd.subtract(np.arange(3))
np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3)
ccd = CCDData(np.ones((3, 3)), unit='adu')
with pytest.raises(ValueError):
ccd.subtract(np.arange(3))
def test_arithmetic_multiply_with_array():
ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m)
res = ccd.multiply(np.ones((3, 3)) * 2)
np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3)
assert res.unit == ccd.unit
def test_arithmetic_divide_with_array():
ccd = CCDData(np.ones((3, 3)), unit=u.m)
res = ccd.divide(np.ones((3, 3)) * 2)
np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3)
assert res.unit == ccd.unit
def test_history_preserved_if_metadata_is_fits_header(tmpdir):
fake_img = np.random.random(size=(100, 100))
hdu = fits.PrimaryHDU(fake_img)
hdu.header['history'] = 'one'
hdu.header['history'] = 'two'
hdu.header['history'] = 'three'
assert len(hdu.header['history']) == 3
tmp_file = tmpdir.join('temp.fits').strpath
hdu.writeto(tmp_file)
ccd_read = CCDData.read(tmp_file, unit="adu")
assert ccd_read.header['history'] == hdu.header['history']
def test_infol_logged_if_unit_in_fits_header(ccd_data, tmpdir):
tmpfile = tmpdir.join('temp.fits')
ccd_data.write(tmpfile.strpath)
log.setLevel('INFO')
explicit_unit_name = "photon"
with log.log_to_list() as log_list:
ccd_from_disk = CCDData.read(tmpfile.strpath, unit=explicit_unit_name)
assert explicit_unit_name in log_list[0].message
def test_wcs_attribute(ccd_data, tmpdir):
"""
Check that WCS attribute gets added to header, and that if a CCDData
object is created from a FITS file with a header, and the WCS attribute
is modified, then the CCDData object is turned back into an hdu, the
WCS object overwrites the old WCS information in the header.
"""
tmpfile = tmpdir.join('temp.fits')
# This wcs example is taken from the astropy.wcs docs.
wcs = WCS(naxis=2)
wcs.wcs.crpix = np.array(ccd_data.shape) / 2
wcs.wcs.cdelt = np.array([-0.066667, 0.066667])
wcs.wcs.crval = [0, -90]
wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"]
wcs.wcs.set_pv([(2, 1, 45.0)])
ccd_data.header = ccd_data.to_hdu()[0].header
ccd_data.header.extend(wcs.to_header(), useblanks=False)
ccd_data.write(tmpfile.strpath)
# Get the header length after it has been extended by the WCS keywords
original_header_length = len(ccd_data.header)
ccd_new = CCDData.read(tmpfile.strpath)
# WCS attribute should be set for ccd_new
assert ccd_new.wcs is not None
# WCS attribute should be equal to wcs above.
assert ccd_new.wcs.wcs == wcs.wcs
# Converting CCDData object with wcs to an hdu shouldn't
# create duplicate wcs-related entries in the header.
ccd_new_hdu = ccd_new.to_hdu()[0]
assert len(ccd_new_hdu.header) == original_header_length
# Making a CCDData with WCS (but not WCS in the header) should lead to
# WCS information in the header when it is converted to an HDU.
ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu")
hdu = ccd_wcs_not_in_header.to_hdu()[0]
wcs_header = wcs.to_header()
for k in wcs_header.keys():
# Skip these keywords if they are in the WCS header because they are
# not WCS-specific.
if k in ['', 'COMMENT', 'HISTORY']:
continue
# No keyword from the WCS should be in the header.
assert k not in ccd_wcs_not_in_header.header
# Every keyword in the WCS should be in the header of the HDU
assert hdu.header[k] == wcs_header[k]
# Now check that if WCS of a CCDData is modified, then the CCDData is
# converted to an HDU, the WCS keywords in the header are overwritten
# with the appropriate keywords from the header.
#
# ccd_new has a WCS and WCS keywords in the header, so try modifying
# the WCS.
ccd_new.wcs.wcs.cdelt *= 2
ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0]
assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0]
assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1]
def test_wcs_keywords_removed_from_header():
"""
Test, for the file included with the nddata tests, that WCS keywords are
properly removed from header.
"""
from astropy.nddata.ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
data_file = get_pkg_data_filename('data/sip-wcs.fits')
ccd = CCDData.read(data_file)
wcs_header = ccd.wcs.to_header()
assert not (set(wcs_header) & set(ccd.meta) - keepers)
# Make sure that exceptions are not raised when trying to remove missing
# keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'.
data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits')
ccd = CCDData.read(data_file1, unit='count')
def test_wcs_SIP_coefficient_keywords_removed():
# If SIP polynomials are present, check that no more polynomial
# coefficients remain in the header. See #8598
# The SIP paper is ambiguous as to whether keywords like
# A_0_0 can appear in the header for a 2nd order or higher
# polynomial. The paper clearly says that the corrections
# are only for quadratic or higher order, so A_0_0 and the like
# should be zero if they are present, but they apparently can be
# there (or at least astrometry.net produces them).
# astropy WCS does not write those coefficients, so they were
# not being removed from the header even though they are WCS-related.
data_file = get_pkg_data_filename('data/sip-wcs.fits')
# Make sure the keywords added to this file for testing are there
hdu = fits.open(data_file)
test_keys = ['A_0_0', 'B_0_1']
for key in test_keys:
assert key in hdu[0].header
ccd = CCDData.read(data_file)
# Now the test...the two keywords above should have been removed.
for key in test_keys:
assert key not in ccd.header
def test_wcs_keyword_removal_for_wcs_test_files():
"""
Test, for the WCS test files, that keyword removal works as
expected. Those cover a much broader range of WCS types than
test_wcs_keywords_removed_from_header.
Includes regression test for #8597
"""
from astropy.nddata.ccddata import _generate_wcs_and_update_header
from astropy.nddata.ccddata import (_KEEP_THESE_KEYWORDS_IN_HEADER,
_CDs, _PCs)
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
wcs_headers = get_pkg_data_filenames('../../wcs/tests/data',
pattern='*.hdr')
for hdr in wcs_headers:
# Skip the files that are expected to be bad...
if 'invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr:
continue
header_string = get_pkg_data_contents(hdr)
header = fits.Header.fromstring(header_string)
wcs = WCS(header_string)
header_from_wcs = wcs.to_header(relax=True)
new_header, new_wcs = _generate_wcs_and_update_header(header)
new_wcs_header = new_wcs.to_header(relax=True)
# Make sure all of the WCS-related keywords generated by astropy
# have been removed.
assert not (set(new_header) &
set(new_wcs_header) -
keepers)
# Check that new_header contains no remaining WCS information.
# Specifically, check that
# 1. The combination of new_header and new_wcs does not contain
# both PCi_j and CDi_j keywords. See #8597.
# Check for 1
final_header = new_header + new_wcs_header
final_header_set = set(final_header)
if _PCs & final_header_set:
assert not (_CDs & final_header_set)
elif _CDs & final_header_set:
assert not (_PCs & final_header_set)
# Check that the new wcs is the same as the old.
for k, v in new_wcs_header.items():
if isinstance(v, str):
assert header_from_wcs[k] == v
else:
np.testing.assert_almost_equal(header_from_wcs[k], v)
def test_read_wcs_not_creatable(tmpdir):
# The following Header can't be converted to a WCS object. See also #6499.
hdr_txt_example_WCS = textwrap.dedent('''
SIMPLE = T / Fits standard
BITPIX = 16 / Bits per pixel
NAXIS = 2 / Number of axes
NAXIS1 = 1104 / Axis length
NAXIS2 = 4241 / Axis length
CRVAL1 = 164.98110962 / Physical value of the reference pixel X
CRVAL2 = 44.34089279 / Physical value of the reference pixel Y
CRPIX1 = -34.0 / Reference pixel in X (pixel)
CRPIX2 = 2041.0 / Reference pixel in Y (pixel)
CDELT1 = 0.10380000 / X Scale projected on detector (#/pix)
CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix)
CTYPE1 = 'RA---TAN' / Pixel coordinate system
CTYPE2 = 'WAVELENGTH' / Pixel coordinate system
CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1
CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2
CD1_1 = 0.20760000 / Pixel Coordinate translation matrix
CD1_2 = 0.00000000 / Pixel Coordinate translation matrix
CD2_1 = 0.00000000 / Pixel Coordinate translation matrix
CD2_2 = 0.10380000 / Pixel Coordinate translation matrix
C2YPE1 = 'RA---TAN' / Pixel coordinate system
C2YPE2 = 'DEC--TAN' / Pixel coordinate system
C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1
C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2
RADECSYS= 'FK5 ' / The equatorial coordinate system
''')
with catch_warnings(FITSFixedWarning):
hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n')
hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)])
filename = tmpdir.join('afile.fits').strpath
hdul.writeto(filename)
# The hdr cannot be converted to a WCS object because of an
# InconsistentAxisTypesError but it should still open the file
ccd = CCDData.read(filename, unit='adu')
assert ccd.wcs is None
def test_header(ccd_data):
a = {'Observer': 'Hubble'}
ccd = CCDData(ccd_data, header=a)
assert ccd.meta == a
def test_wcs_arithmetic(ccd_data):
ccd_data.wcs = 5
result = ccd_data.multiply(1.0)
assert result.wcs == 5
@pytest.mark.parametrize('operation',
['multiply', 'divide', 'add', 'subtract'])
def test_wcs_arithmetic_ccd(ccd_data, operation):
ccd_data2 = ccd_data.copy()
ccd_data.wcs = 5
method = ccd_data.__getattribute__(operation)
result = method(ccd_data2)
assert result.wcs == ccd_data.wcs
assert ccd_data2.wcs is None
def test_wcs_sip_handling():
"""
Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive
a roundtrip unchanged.
"""
data_file = get_pkg_data_filename('data/sip-wcs.fits')
def check_wcs_ctypes(header):
expected_wcs_ctypes = {
'CTYPE1': 'RA---TAN-SIP',
'CTYPE2': 'DEC--TAN-SIP'
}
return [header[k] == v for k, v in expected_wcs_ctypes.items()]
ccd_original = CCDData.read(data_file)
# After initialization the keywords should be in the WCS, not in the
# meta.
with fits.open(data_file) as raw:
good_ctype = check_wcs_ctypes(raw[0].header)
assert all(good_ctype)
ccd_new = ccd_original.to_hdu()
good_ctype = check_wcs_ctypes(ccd_new[0].header)
assert all(good_ctype)
# Try converting to header with wcs_relax=False and
# the header should contain the CTYPE keywords without
# the -SIP
ccd_no_relax = ccd_original.to_hdu(wcs_relax=False)
good_ctype = check_wcs_ctypes(ccd_no_relax[0].header)
assert not any(good_ctype)
assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN'
assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN'
@pytest.mark.parametrize('operation',
['multiply', 'divide', 'add', 'subtract'])
def test_mask_arithmetic_ccd(ccd_data, operation):
ccd_data2 = ccd_data.copy()
ccd_data.mask = (ccd_data.data > 0)
method = ccd_data.__getattribute__(operation)
result = method(ccd_data2)
np.testing.assert_equal(result.mask, ccd_data.mask)
def test_write_read_multiextensionfits_mask_default(ccd_data, tmpdir):
# Test that if a mask is present the mask is saved and loaded by default.
ccd_data.mask = ccd_data.data > 10
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename)
ccd_after = CCDData.read(filename)
assert ccd_after.mask is not None
np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask)
@pytest.mark.parametrize(
'uncertainty_type',
[StdDevUncertainty, VarianceUncertainty, InverseVariance])
def test_write_read_multiextensionfits_uncertainty_default(
ccd_data, tmpdir, uncertainty_type):
# Test that if a uncertainty is present it is saved and loaded by default.
ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename)
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is not None
assert type(ccd_after.uncertainty) is uncertainty_type
np.testing.assert_array_equal(ccd_data.uncertainty.array,
ccd_after.uncertainty.array)
@pytest.mark.parametrize(
'uncertainty_type',
[StdDevUncertainty, VarianceUncertainty, InverseVariance])
def test_write_read_multiextensionfits_uncertainty_different_uncertainty_key(
ccd_data, tmpdir, uncertainty_type):
# Test that if a uncertainty is present it is saved and loaded by default.
ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename, key_uncertainty_type='Blah')
ccd_after = CCDData.read(filename, key_uncertainty_type='Blah')
assert ccd_after.uncertainty is not None
assert type(ccd_after.uncertainty) is uncertainty_type
np.testing.assert_array_equal(ccd_data.uncertainty.array,
ccd_after.uncertainty.array)
def test_write_read_multiextensionfits_not(ccd_data, tmpdir):
# Test that writing mask and uncertainty can be disabled
ccd_data.mask = ccd_data.data > 10
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None)
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is None
assert ccd_after.mask is None
def test_write_read_multiextensionfits_custom_ext_names(ccd_data, tmpdir):
# Test writing mask, uncertainty in another extension than default
ccd_data.mask = ccd_data.data > 10
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun')
# Try reading with defaults extension names
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is None
assert ccd_after.mask is None
# Try reading with custom extension names
ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun')
assert ccd_after.uncertainty is not None
assert ccd_after.mask is not None
np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask)
np.testing.assert_array_equal(ccd_data.uncertainty.array,
ccd_after.uncertainty.array)
def test_read_old_style_multiextensionfits(tmpdir):
# Regression test for https://github.com/astropy/ccdproc/issues/664
#
# Prior to astropy 3.1 there was no uncertainty type saved
# in the multiextension fits files generated by CCDData
# because the uncertainty had to be StandardDevUncertainty.
#
# Current version should be able to read those in.
#
size = 4
# Value of the variables below are not important to the test.
data = np.zeros([size, size])
mask = data > 0.9
uncert = np.sqrt(data)
ccd = CCDData(data=data, mask=mask, uncertainty=uncert, unit='adu')
# We'll create the file manually to ensure we have the
# right extension names and no uncertainty type.
hdulist = ccd.to_hdu()
del hdulist[2].header['UTYPE']
file_name = tmpdir.join('old_ccddata_mef.fits').strpath
hdulist.writeto(file_name)
ccd = CCDData.read(file_name)
assert isinstance(ccd.uncertainty, StdDevUncertainty)
def test_wcs(ccd_data):
ccd_data.wcs = 5
assert ccd_data.wcs == 5
def test_recognized_fits_formats_for_read_write(ccd_data, tmpdir):
# These are the extensions that are supposed to be supported.
supported_extensions = ['fit', 'fits', 'fts']
for ext in supported_extensions:
path = tmpdir.join("test.{}".format(ext))
ccd_data.write(path.strpath)
from_disk = CCDData.read(path.strpath)
assert (ccd_data.data == from_disk.data).all()
def test_stddevuncertainty_compat_descriptor_no_parent():
with pytest.raises(MissingDataAssociationException):
StdDevUncertainty(np.ones((10, 10))).parent_nddata
def test_stddevuncertainty_compat_descriptor_no_weakref():
# TODO: Remove this test if astropy 1.0 isn't supported anymore
# This test might create a Memoryleak on purpose, so the last lines after
# the assert are IMPORTANT cleanup.
ccd = CCDData(np.ones((10, 10)), unit='')
uncert = StdDevUncertainty(np.ones((10, 10)))
uncert._parent_nddata = ccd
assert uncert.parent_nddata is ccd
uncert._parent_nddata = None
# https://github.com/astropy/astropy/issues/7595
def test_read_returns_image(tmpdir):
# Test if CCData.read returns a image when reading a fits file containing
# a table and image, in that order.
tbl = Table(np.ones(10).reshape(5, 2))
img = np.ones((5, 5))
hdul = fits.HDUList(hdus=[fits.PrimaryHDU(), fits.TableHDU(tbl.as_array()),
fits.ImageHDU(img)])
filename = tmpdir.join('table_image.fits').strpath
hdul.writeto(filename)
ccd = CCDData.read(filename, unit='adu')
# Expecting to get (5, 5), the size of the image
assert ccd.data.shape == (5, 5)
|
96f675b99aa449857ea6afa658694d129db0f7ea437db64c4e921b2de688f330 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from astropy.tests.helper import assert_quantity_allclose
from astropy.nddata.utils import (extract_array, add_array, subpixel_indices,
block_reduce, block_replicate,
overlap_slices, NoOverlapError, PartialOverlapError,
Cutout2D)
from astropy.wcs import WCS, Sip
from astropy.wcs.utils import proj_plane_pixel_area
from astropy.coordinates import SkyCoord
from astropy import units as u
try:
import skimage # pylint: disable=W0611
HAS_SKIMAGE = True
except ImportError:
HAS_SKIMAGE = False
test_positions = [(10.52, 3.12), (5.62, 12.97), (31.33, 31.77),
(0.46, 0.94), (20.45, 12.12), (42.24, 24.42)]
test_position_indices = [(0, 3), (0, 2), (4, 1),
(4, 2), (4, 3), (3, 4)]
test_slices = [slice(10.52, 3.12), slice(5.62, 12.97),
slice(31.33, 31.77), slice(0.46, 0.94),
slice(20.45, 12.12), slice(42.24, 24.42)]
subsampling = 5
test_pos_bad = [(-1, -4), (-1, 0), (6, 2), (6, 6)]
def test_slices_different_dim():
'''Overlap from arrays with different number of dim is undefined.'''
with pytest.raises(ValueError) as e:
overlap_slices((4, 5, 6), (1, 2), (0, 0))
assert "the same number of dimensions" in str(e.value)
def test_slices_pos_different_dim():
'''Position must have same dim as arrays.'''
with pytest.raises(ValueError) as e:
overlap_slices((4, 5), (1, 2), (0, 0, 3))
assert "the same number of dimensions" in str(e.value)
@pytest.mark.parametrize('pos', test_pos_bad)
def test_slices_no_overlap(pos):
'''If there is no overlap between arrays, an error should be raised.'''
with pytest.raises(NoOverlapError):
overlap_slices((5, 5), (2, 2), pos)
def test_slices_partial_overlap():
'''Compute a slice for partially overlapping arrays.'''
temp = overlap_slices((5,), (3,), (0,))
assert temp == ((slice(0, 2, None),), (slice(1, 3, None),))
temp = overlap_slices((5,), (3,), (0,), mode='partial')
assert temp == ((slice(0, 2, None),), (slice(1, 3, None),))
for pos in [0, 4]:
with pytest.raises(PartialOverlapError) as e:
temp = overlap_slices((5,), (3,), (pos,), mode='strict')
assert 'Arrays overlap only partially.' in str(e.value)
def test_slices_overlap_wrong_mode():
'''Call overlap_slices with non-existing mode.'''
with pytest.raises(ValueError) as e:
overlap_slices((5,), (3,), (0,), mode='full')
assert "Mode can be only" in str(e.value)
def test_extract_array_even_shape_rounding():
"""
Test overlap_slices (via extract_array) for rounding with an
even-shaped extraction.
"""
data = np.arange(10)
shape = (2,)
positions_expected = [(1.49, (1, 2)), (1.5, (1, 2)), (1.501, (1, 2)),
(1.99, (1, 2)), (2.0, (1, 2)), (2.01, (2, 3)),
(2.49, (2, 3)), (2.5, (2, 3)), (2.501, (2, 3)),
(2.99, (2, 3)), (3.0, (2, 3)), (3.01, (3, 4))]
for pos, exp in positions_expected:
out = extract_array(data, shape, (pos, ), mode='partial')
assert_array_equal(out, exp)
# test negative positions
positions = (-0.99, -0.51, -0.5, -0.49, -0.01, 0)
exp1 = (-99, 0)
exp2 = (0, 1)
expected = [exp1, ] * 6 + [exp2, ]
for pos, exp in zip(positions, expected):
out = extract_array(data, shape, (pos, ), mode='partial',
fill_value=-99)
assert_array_equal(out, exp)
def test_extract_array_odd_shape_rounding():
"""
Test overlap_slices (via extract_array) for rounding with an
even-shaped extraction.
"""
data = np.arange(10)
shape = (3,)
positions_expected = [(1.49, (0, 1, 2)), (1.5, (0, 1, 2)),
(1.501, (1, 2, 3)), (1.99, (1, 2, 3)),
(2.0, (1, 2, 3)), (2.01, (1, 2, 3)),
(2.49, (1, 2, 3)), (2.5, (1, 2, 3)),
(2.501, (2, 3, 4)), (2.99, (2, 3, 4)),
(3.0, (2, 3, 4)), (3.01, (2, 3, 4))]
for pos, exp in positions_expected:
out = extract_array(data, shape, (pos, ), mode='partial')
assert_array_equal(out, exp)
# test negative positions
positions = (-0.99, -0.51, -0.5, -0.49, -0.01, 0)
exp1 = (-99, -99, 0)
exp2 = (-99, 0, 1)
expected = [exp1, ] * 3 + [exp2, ] * 4
for pos, exp in zip(positions, expected):
out = extract_array(data, shape, (pos, ), mode='partial',
fill_value=-99)
assert_array_equal(out, exp)
def test_extract_array_wrong_mode():
'''Call extract_array with non-existing mode.'''
with pytest.raises(ValueError) as e:
extract_array(np.arange(4), (2, ), (0, ), mode='full')
assert "Valid modes are 'partial', 'trim', and 'strict'." == str(e.value)
def test_extract_array_1d_even():
'''Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
'''
assert np.all(extract_array(np.arange(4), (2, ), (0, ),
fill_value=-99) == np.array([-99, 0]))
for i in [1, 2, 3]:
assert np.all(extract_array(np.arange(4), (2, ), (i, )) ==
np.array([i - 1, i]))
assert np.all(extract_array(np.arange(4.), (2, ), (4, ),
fill_value=np.inf) == np.array([3, np.inf]))
def test_extract_array_1d_odd():
'''Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
The first few lines test the most error-prone part: Extraction of an
array on the boundaries.
Additional tests (e.g. dtype of return array) are done for the last
case only.
'''
assert np.all(extract_array(np.arange(4), (3,), (-1, ),
fill_value=-99) == np.array([-99, -99, 0]))
assert np.all(extract_array(np.arange(4), (3,), (0, ),
fill_value=-99) == np.array([-99, 0, 1]))
for i in [1, 2]:
assert np.all(extract_array(np.arange(4), (3,), (i, )) ==
np.array([i-1, i, i+1]))
assert np.all(extract_array(np.arange(4), (3,), (3, ),
fill_value=-99) == np.array([2, 3, -99]))
arrayin = np.arange(4.)
extracted = extract_array(arrayin, (3,), (4, ))
assert extracted[0] == 3
assert np.isnan(extracted[1]) # since I cannot use `==` to test for nan
assert extracted.dtype == arrayin.dtype
def test_extract_array_1d():
"""In 1d, shape can be int instead of tuple"""
assert np.all(extract_array(np.arange(4), 3, (-1, ),
fill_value=-99) == np.array([-99, -99, 0]))
assert np.all(extract_array(np.arange(4), 3, -1,
fill_value=-99) == np.array([-99, -99, 0]))
def test_extract_Array_float():
"""integer is at bin center"""
for a in np.arange(2.51, 3.49, 0.1):
assert np.all(extract_array(np.arange(5), 3, a) ==
np.array([2, 3, 4]))
def test_extract_array_1d_trim():
'''Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
'''
assert np.all(extract_array(np.arange(4), (2, ), (0, ),
mode='trim') == np.array([0]))
for i in [1, 2, 3]:
assert np.all(extract_array(np.arange(4), (2, ), (i, ),
mode='trim') == np.array([i - 1, i]))
assert np.all(extract_array(np.arange(4.), (2, ), (4, ),
mode='trim') == np.array([3]))
@pytest.mark.parametrize('mode', ['partial', 'trim', 'strict'])
def test_extract_array_easy(mode):
"""
Test extract_array utility function.
Test by extracting an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((5, 5))
large_test_array[3:8, 3:8] = small_test_array
extracted_array = extract_array(large_test_array, (5, 5), (5, 5),
mode=mode)
assert np.all(extracted_array == small_test_array)
def test_extract_array_return_pos():
'''Check that the return position is calculated correctly.
The result will differ by mode. All test here are done in 1d because it's
easier to construct correct test cases.
'''
large_test_array = np.arange(5)
for i in np.arange(-1, 6):
extracted, new_pos = extract_array(large_test_array, 3, i,
mode='partial',
return_position=True)
assert new_pos == (1, )
# Now check an array with an even number
for i, expected in zip([1.49, 1.51, 3], [0.49, 0.51, 1]):
extracted, new_pos = extract_array(large_test_array, (2,), (i,),
mode='strict', return_position=True)
assert new_pos == (expected, )
# For mode='trim' the answer actually depends
for i, expected in zip(np.arange(-1, 6), (-1, 0, 1, 1, 1, 1, 1)):
extracted, new_pos = extract_array(large_test_array, (3,), (i,),
mode='trim', return_position=True)
assert new_pos == (expected, )
def test_add_array_odd_shape():
"""
Test add_array utility function.
Test by adding an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((5, 5))
large_test_array_ref = large_test_array.copy()
large_test_array_ref[3:8, 3:8] += small_test_array
added_array = add_array(large_test_array, small_test_array, (5, 5))
assert np.all(added_array == large_test_array_ref)
def test_add_array_even_shape():
"""
Test add_array_2D utility function.
Test by adding an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((4, 4))
large_test_array_ref = large_test_array.copy()
large_test_array_ref[0:2, 0:2] += small_test_array[2:4, 2:4]
added_array = add_array(large_test_array, small_test_array, (0, 0))
assert np.all(added_array == large_test_array_ref)
@pytest.mark.parametrize(('position', 'subpixel_index'),
zip(test_positions, test_position_indices))
def test_subpixel_indices(position, subpixel_index):
"""
Test subpixel_indices utility function.
Test by asserting that the function returns correct results for
given test values.
"""
assert np.all(subpixel_indices(position, subsampling) == subpixel_index)
@pytest.mark.skipif('not HAS_SKIMAGE')
class TestBlockReduce:
def test_1d(self):
"""Test 1D array."""
data = np.arange(4)
expected = np.array([1, 5])
result = block_reduce(data, 2)
assert np.all(result == expected)
def test_1d_mean(self):
"""Test 1D array with func=np.mean."""
data = np.arange(4)
block_size = 2.
expected = block_reduce(data, block_size, func=np.sum) / block_size
result_mean = block_reduce(data, block_size, func=np.mean)
assert np.all(result_mean == expected)
def test_2d(self):
"""Test 2D array."""
data = np.arange(4).reshape(2, 2)
expected = np.array([[6]])
result = block_reduce(data, 2)
assert np.all(result == expected)
def test_2d_mean(self):
"""Test 2D array with func=np.mean."""
data = np.arange(4).reshape(2, 2)
block_size = 2.
expected = (block_reduce(data, block_size, func=np.sum) /
block_size**2)
result = block_reduce(data, block_size, func=np.mean)
assert np.all(result == expected)
def test_2d_trim(self):
"""
Test trimming of 2D array when size is not perfectly divisible
by block_size.
"""
data1 = np.arange(15).reshape(5, 3)
result1 = block_reduce(data1, 2)
data2 = data1[0:4, 0:2]
result2 = block_reduce(data2, 2)
assert np.all(result1 == result2)
def test_block_size_broadcasting(self):
"""Test scalar block_size broadcasting."""
data = np.arange(16).reshape(4, 4)
result1 = block_reduce(data, 2)
result2 = block_reduce(data, (2, 2))
assert np.all(result1 == result2)
def test_block_size_len(self):
"""Test block_size length."""
data = np.ones((2, 2))
with pytest.raises(ValueError):
block_reduce(data, (2, 2, 2))
@pytest.mark.skipif('not HAS_SKIMAGE')
class TestBlockReplicate:
def test_1d(self):
"""Test 1D array."""
data = np.arange(2)
expected = np.array([0, 0, 0.5, 0.5])
result = block_replicate(data, 2)
assert np.all(result == expected)
def test_1d_conserve_sum(self):
"""Test 1D array with conserve_sum=False."""
data = np.arange(2)
block_size = 2.
expected = block_replicate(data, block_size) * block_size
result = block_replicate(data, block_size, conserve_sum=False)
assert np.all(result == expected)
def test_2d(self):
"""Test 2D array."""
data = np.arange(2).reshape(2, 1)
expected = np.array([[0, 0], [0, 0], [0.25, 0.25], [0.25, 0.25]])
result = block_replicate(data, 2)
assert np.all(result == expected)
def test_2d_conserve_sum(self):
"""Test 2D array with conserve_sum=False."""
data = np.arange(6).reshape(2, 3)
block_size = 2.
expected = block_replicate(data, block_size) * block_size**2
result = block_replicate(data, block_size, conserve_sum=False)
assert np.all(result == expected)
def test_block_size_broadcasting(self):
"""Test scalar block_size broadcasting."""
data = np.arange(4).reshape(2, 2)
result1 = block_replicate(data, 2)
result2 = block_replicate(data, (2, 2))
assert np.all(result1 == result2)
def test_block_size_len(self):
"""Test block_size length."""
data = np.arange(5)
with pytest.raises(ValueError):
block_replicate(data, (2, 2))
class TestCutout2D:
def setup_class(self):
self.data = np.arange(20.).reshape(5, 4)
self.position = SkyCoord('13h11m29.96s -01d19m18.7s', frame='icrs')
wcs = WCS(naxis=2)
rho = np.pi / 3.
scale = 0.05 / 3600.
wcs.wcs.cd = [[scale*np.cos(rho), -scale*np.sin(rho)],
[scale*np.sin(rho), scale*np.cos(rho)]]
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
wcs.wcs.crval = [self.position.ra.to_value(u.deg),
self.position.dec.to_value(u.deg)]
wcs.wcs.crpix = [3, 3]
self.wcs = wcs
# add SIP
sipwcs = wcs.deepcopy()
sipwcs.wcs.ctype = ['RA---TAN-SIP', 'DEC--TAN-SIP']
a = np.array(
[[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[-2.81029767e-13, 0.0, 0.0, 0.0, 0.0]]
)
b = np.array(
[[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0]]
)
sipwcs.sip = Sip(a, b, None, None, wcs.wcs.crpix)
sipwcs.wcs.set()
self.sipwcs = sipwcs
def test_cutout(self):
sizes = [3, 3*u.pixel, (3, 3), (3*u.pixel, 3*u.pix), (3., 3*u.pixel),
(2.9, 3.3)]
for size in sizes:
position = (2.1, 1.9)
c = Cutout2D(self.data, position, size)
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 10
assert c.origin_original == (1, 1)
assert c.origin_cutout == (0, 0)
assert c.input_position_original == position
assert_allclose(c.input_position_cutout, (1.1, 0.9))
assert c.position_original == (2., 2.)
assert c.position_cutout == (1., 1.)
assert c.center_original == (2., 2.)
assert c.center_cutout == (1., 1.)
assert c.bbox_original == ((1, 3), (1, 3))
assert c.bbox_cutout == ((0, 2), (0, 2))
assert c.slices_original == (slice(1, 4), slice(1, 4))
assert c.slices_cutout == (slice(0, 3), slice(0, 3))
def test_size_length(self):
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), (1, 1, 1))
def test_size_units(self):
for size in [3 * u.cm, (3, 3 * u.K)]:
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), size)
def test_size_pixel(self):
"""
Check size in derived pixel units.
"""
size = 0.3*u.arcsec / (0.1*u.arcsec/u.pixel)
c = Cutout2D(self.data, (2, 2), size)
assert c.data.shape == (3, 3)
assert c.data[0, 0] == 5
assert c.slices_original == (slice(1, 4), slice(1, 4))
assert c.slices_cutout == (slice(0, 3), slice(0, 3))
def test_size_angle(self):
c = Cutout2D(self.data, (2, 2), (0.1*u.arcsec), wcs=self.wcs)
assert c.data.shape == (2, 2)
assert c.data[0, 0] == 5
assert c.slices_original == (slice(1, 3), slice(1, 3))
assert c.slices_cutout == (slice(0, 2), slice(0, 2))
def test_size_angle_without_wcs(self):
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), (3, 3 * u.arcsec))
def test_cutout_trim_overlap(self):
c = Cutout2D(self.data, (0, 0), (3, 3), mode='trim')
assert c.data.shape == (2, 2)
assert c.data[0, 0] == 0
assert c.slices_original == (slice(0, 2), slice(0, 2))
assert c.slices_cutout == (slice(0, 2), slice(0, 2))
def test_cutout_partial_overlap(self):
c = Cutout2D(self.data, (0, 0), (3, 3), mode='partial')
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 0
assert c.slices_original == (slice(0, 2), slice(0, 2))
assert c.slices_cutout == (slice(1, 3), slice(1, 3))
def test_cutout_partial_overlap_fill_value(self):
fill_value = -99
c = Cutout2D(self.data, (0, 0), (3, 3), mode='partial',
fill_value=fill_value)
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 0
assert c.data[0, 0] == fill_value
def test_copy(self):
data = np.copy(self.data)
c = Cutout2D(data, (2, 3), (3, 3))
xy = (0, 0)
value = 100.
c.data[xy] = value
xy_orig = c.to_original_position(xy)
yx = xy_orig[::-1]
assert data[yx] == value
data = np.copy(self.data)
c2 = Cutout2D(self.data, (2, 3), (3, 3), copy=True)
c2.data[xy] = value
assert data[yx] != value
def test_to_from_large(self):
position = (2, 2)
c = Cutout2D(self.data, position, (3, 3))
xy = (0, 0)
result = c.to_cutout_position(c.to_original_position(xy))
assert_allclose(result, xy)
def test_skycoord_without_wcs(self):
with pytest.raises(ValueError):
Cutout2D(self.data, self.position, (3, 3))
def test_skycoord(self):
c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs)
skycoord_original = self.position.from_pixel(c.center_original[1],
c.center_original[0],
self.wcs)
skycoord_cutout = self.position.from_pixel(c.center_cutout[1],
c.center_cutout[0], c.wcs)
assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)
assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)
def test_skycoord_partial(self):
c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs,
mode='partial')
skycoord_original = self.position.from_pixel(c.center_original[1],
c.center_original[0],
self.wcs)
skycoord_cutout = self.position.from_pixel(c.center_cutout[1],
c.center_cutout[0], c.wcs)
assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)
assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)
def test_naxis_update(self):
xsize = 2
ysize = 3
c = Cutout2D(self.data, self.position, (ysize, xsize), wcs=self.wcs)
assert c.wcs.array_shape == (ysize, xsize)
def test_crpix_maps_to_crval(self):
w = Cutout2D(self.data, (0, 0), (3, 3), wcs=self.sipwcs,
mode='partial').wcs
pscale = np.sqrt(proj_plane_pixel_area(w))
assert_allclose(
w.wcs_pix2world(*w.wcs.crpix, 1), w.wcs.crval,
rtol=0.0, atol=1e-6 * pscale
)
assert_allclose(
w.all_pix2world(*w.wcs.crpix, 1), w.wcs.crval,
rtol=0.0, atol=1e-6 * pscale
)
|
535f336f243514a607f9cba29e22c48df7943b773ff6b3c1e123cf96af5b7ef7 | """
A module containing unit tests for the `bitmask` modue.
Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
import warnings
import numpy as np
import pytest
from astropy.nddata import bitmask
MAX_INT_TYPE = np.maximum_sctype(np.int)
MAX_UINT_TYPE = np.maximum_sctype(np.uint)
MAX_UINT_FLAG = np.left_shift(
MAX_UINT_TYPE(1),
MAX_UINT_TYPE(np.iinfo(MAX_UINT_TYPE).bits - 1)
)
MAX_INT_FLAG = np.left_shift(
MAX_INT_TYPE(1),
MAX_INT_TYPE(np.iinfo(MAX_INT_TYPE).bits - 2)
)
SUPER_LARGE_FLAG = 1 << np.iinfo(MAX_UINT_TYPE).bits
EXTREME_TEST_DATA = np.array([
0, 1, 1 + 1 << 2, MAX_INT_FLAG, ~0, MAX_INT_TYPE(MAX_UINT_FLAG),
1 + MAX_INT_TYPE(MAX_UINT_FLAG)
], dtype=MAX_INT_TYPE)
@pytest.mark.parametrize('flag', [0, -1])
def test_nonpositive_not_a_bit_flag(flag):
assert not bitmask._is_bit_flag(n=flag)
@pytest.mark.parametrize('flag', [
1, MAX_UINT_FLAG, int(MAX_UINT_FLAG), SUPER_LARGE_FLAG
])
def test_is_bit_flag(flag):
assert bitmask._is_bit_flag(n=flag)
@pytest.mark.parametrize('number', [0, 1, MAX_UINT_FLAG, SUPER_LARGE_FLAG])
def test_is_int(number):
assert bitmask._is_int(number)
@pytest.mark.parametrize('number', ['1', True, 1.0])
def test_nonint_is_not_an_int(number):
assert not bitmask._is_int(number)
@pytest.mark.parametrize('flag,flip,expected', [
(3, None, 3),
(3, True, -4),
(3, False, 3),
([1, 2], False, 3),
([1, 2], True, -4)
])
def test_interpret_valid_int_bit_flags(flag, flip, expected):
assert(
bitmask.interpret_bit_flags(bit_flags=flag, flip_bits=flip) == expected
)
@pytest.mark.parametrize('flag', [None, ' ', 'None', 'Indef'])
def test_interpret_none_bit_flags_as_None(flag):
assert bitmask.interpret_bit_flags(bit_flags=flag) is None
@pytest.mark.parametrize('flag,expected', [
('1', 1),
('~-1', ~(-1)),
('~1', ~1),
('1,2', 3),
('1+2', 3),
('(1,2)', 3),
('(1+2)', 3),
('~1,2', ~3),
('~1+2', ~3),
('~(1,2)', ~3),
('~(1+2)', ~3)
])
def test_interpret_valid_str_bit_flags(flag, expected):
assert(
bitmask.interpret_bit_flags(bit_flags=flag) == expected
)
@pytest.mark.parametrize('flag,flip', [
(None, True),
(' ', True),
('None', True),
('Indef', True),
(None, False),
(' ', False),
('None', False),
('Indef', False),
('1', True),
('1', False)
])
def test_interpret_None_or_str_and_flip_incompatibility(flag, flip):
with pytest.raises(TypeError):
bitmask.interpret_bit_flags(bit_flags=flag, flip_bits=flip)
@pytest.mark.parametrize('flag', [True, 1.0, [1.0], object])
def test_interpret_wrong_flag_type(flag):
with pytest.raises(TypeError):
bitmask.interpret_bit_flags(bit_flags=flag)
@pytest.mark.parametrize('flag', ['SOMETHING', '1.0,2,3'])
def test_interpret_wrong_string_int_format(flag):
with pytest.raises(ValueError):
bitmask.interpret_bit_flags(bit_flags=flag)
def test_interpret_duplicate_flag_warning():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert bitmask.interpret_bit_flags([2, 4, 4]) == 6
assert len(w)
assert issubclass(w[-1].category, UserWarning)
assert "Duplicate" in str(w[-1].message)
@pytest.mark.parametrize('flag', [[1, 2, 3], '1, 2, 3'])
def test_interpret_non_flag(flag):
with pytest.raises(ValueError):
bitmask.interpret_bit_flags(bit_flags=flag)
def test_interpret_allow_single_value_str_nonflags():
assert bitmask.interpret_bit_flags(bit_flags=str(3)) == 3
@pytest.mark.parametrize('flag', [
'~',
'( )',
'(~1,2)',
'~(1,2',
'1,~2',
'1,(2,4)',
'1,2+4',
'1+4,2'
])
def test_interpret_bad_str_syntax(flag):
with pytest.raises(ValueError):
bitmask.interpret_bit_flags(bit_flags=flag)
def test_bitfield_must_be_integer_check():
with pytest.raises(TypeError):
bitmask.bitfield_to_boolean_mask(1.0, 1)
@pytest.mark.parametrize('data,flags,flip,goodval,dtype,ref', [
(EXTREME_TEST_DATA, None, None, True, np.bool_,
EXTREME_TEST_DATA.size * [1]),
(EXTREME_TEST_DATA, None, None, False, np.bool_,
EXTREME_TEST_DATA.size * [0]),
(EXTREME_TEST_DATA, [1, MAX_UINT_FLAG], False, True, np.bool_,
[1, 1, 0, 0, 0, 1, 1]),
(EXTREME_TEST_DATA, None, None, True, np.bool_,
EXTREME_TEST_DATA.size * [1]),
(EXTREME_TEST_DATA, [1, MAX_UINT_FLAG], False, False, np.bool_,
[0, 0, 1, 1, 1, 0, 0]),
(EXTREME_TEST_DATA, [1, MAX_UINT_FLAG], True, True, np.int8,
[1, 0, 1, 1, 0, 0, 0])
])
def test_bitfield_to_boolean_mask(data, flags, flip, goodval, dtype, ref):
mask = bitmask.bitfield_to_boolean_mask(
bitfield=data,
ignore_flags=flags,
flip_bits=flip,
good_mask_value=goodval,
dtype=dtype
)
assert(mask.dtype == dtype)
assert np.all(mask == ref)
|
34a8c089c054e3beacc5bb57a2dbdd88b10506e2fc68be527fd8560d355c7f45 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.nddata import FlagCollection
def test_init():
FlagCollection(shape=(1, 2, 3))
def test_init_noshape():
with pytest.raises(Exception) as exc:
FlagCollection()
assert exc.value.args[0] == ('FlagCollection should be initialized with '
'the shape of the data')
def test_init_notiterable():
with pytest.raises(Exception) as exc:
FlagCollection(shape=1.)
assert exc.value.args[0] == ('FlagCollection shape should be '
'an iterable object')
def test_setitem():
f = FlagCollection(shape=(1, 2, 3))
f['a'] = np.ones((1, 2, 3)).astype(float)
f['b'] = np.ones((1, 2, 3)).astype(int)
f['c'] = np.ones((1, 2, 3)).astype(bool)
f['d'] = np.ones((1, 2, 3)).astype(str)
@pytest.mark.parametrize(('value'), [1, 1., 'spam', [1, 2, 3], (1., 2., 3.)])
def test_setitem_invalid_type(value):
f = FlagCollection(shape=(1, 2, 3))
with pytest.raises(Exception) as exc:
f['a'] = value
assert exc.value.args[0] == 'flags should be given as a Numpy array'
def test_setitem_invalid_shape():
f = FlagCollection(shape=(1, 2, 3))
with pytest.raises(ValueError) as exc:
f['a'] = np.ones((3, 2, 1))
assert exc.value.args[0].startswith('flags array shape')
assert exc.value.args[0].endswith('does not match data shape (1, 2, 3)')
|
c0f2c171ea374c36664ed694c3016a9b99ccac4eb5c27289d1e2336e4d939484 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pickle
import textwrap
from collections import OrderedDict
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from astropy.nddata.nddata import NDData
from astropy.nddata.nduncertainty import StdDevUncertainty
from astropy import units as u
from astropy.utils import NumpyRNGContext
from .test_nduncertainty import FakeUncertainty
class FakeNumpyArray:
"""
Class that has a few of the attributes of a numpy array.
These attributes are checked for by NDData.
"""
def __init__(self):
super().__init__()
def shape(self):
pass
def __getitem__(self):
pass
def __array__(self):
pass
@property
def dtype(self):
return 'fake'
class MinimalUncertainty:
"""
Define the minimum attributes acceptable as an uncertainty object.
"""
def __init__(self, value):
self._uncertainty = value
@property
def uncertainty_type(self):
return "totally and completely fake"
class BadNDDataSubclass(NDData):
def __init__(self, data, uncertainty=None, mask=None, wcs=None,
meta=None, unit=None):
self._data = data
self._uncertainty = uncertainty
self._mask = mask
self._wcs = wcs
self._unit = unit
self._meta = meta
# Setter tests
def test_uncertainty_setter():
nd = NDData([1, 2, 3])
good_uncertainty = MinimalUncertainty(5)
nd.uncertainty = good_uncertainty
assert nd.uncertainty is good_uncertainty
# Check the fake uncertainty (minimal does not work since it has no
# parent_nddata attribute from NDUncertainty)
nd.uncertainty = FakeUncertainty(5)
assert nd.uncertainty.parent_nddata is nd
# Check that it works if the uncertainty was set during init
nd = NDData(nd)
assert isinstance(nd.uncertainty, FakeUncertainty)
nd.uncertainty = 10
assert not isinstance(nd.uncertainty, FakeUncertainty)
assert nd.uncertainty.array == 10
def test_mask_setter():
# Since it just changes the _mask attribute everything should work
nd = NDData([1, 2, 3])
nd.mask = True
assert nd.mask
nd.mask = False
assert not nd.mask
# Check that it replaces a mask from init
nd = NDData(nd, mask=True)
assert nd.mask
nd.mask = False
assert not nd.mask
# Init tests
def test_nddata_empty():
with pytest.raises(TypeError):
NDData() # empty initializer should fail
def test_nddata_init_data_nonarray():
inp = [1, 2, 3]
nd = NDData(inp)
assert (np.array(inp) == nd.data).all()
def test_nddata_init_data_ndarray():
# random floats
with NumpyRNGContext(123):
nd = NDData(np.random.random((10, 10)))
assert nd.data.shape == (10, 10)
assert nd.data.size == 100
assert nd.data.dtype == np.dtype(float)
# specific integers
nd = NDData(np.array([[1, 2, 3], [4, 5, 6]]))
assert nd.data.size == 6
assert nd.data.dtype == np.dtype(int)
# Tests to ensure that creating a new NDData object copies by *reference*.
a = np.ones((10, 10))
nd_ref = NDData(a)
a[0, 0] = 0
assert nd_ref.data[0, 0] == 0
# Except we choose copy=True
a = np.ones((10, 10))
nd_ref = NDData(a, copy=True)
a[0, 0] = 0
assert nd_ref.data[0, 0] != 0
def test_nddata_init_data_maskedarray():
with NumpyRNGContext(456):
NDData(np.random.random((10, 10)),
mask=np.random.random((10, 10)) > 0.5)
# Another test (just copied here)
with NumpyRNGContext(12345):
a = np.random.randn(100)
marr = np.ma.masked_where(a > 0, a)
nd = NDData(marr)
# check that masks and data match
assert_array_equal(nd.mask, marr.mask)
assert_array_equal(nd.data, marr.data)
# check that they are both by reference
marr.mask[10] = ~marr.mask[10]
marr.data[11] = 123456789
assert_array_equal(nd.mask, marr.mask)
assert_array_equal(nd.data, marr.data)
# or not if we choose copy=True
nd = NDData(marr, copy=True)
marr.mask[10] = ~marr.mask[10]
marr.data[11] = 0
assert nd.mask[10] != marr.mask[10]
assert nd.data[11] != marr.data[11]
@pytest.mark.parametrize('data', [np.array([1, 2, 3]), 5])
def test_nddata_init_data_quantity(data):
# Test an array and a scalar because a scalar Quantity does not always
# behaves the same way as an array.
quantity = data * u.adu
ndd = NDData(quantity)
assert ndd.unit == quantity.unit
assert_array_equal(ndd.data, np.array(quantity.value))
if ndd.data.size > 1:
# check that if it is an array it is not copied
quantity.value[1] = 100
assert ndd.data[1] == quantity.value[1]
# or is copied if we choose copy=True
ndd = NDData(quantity, copy=True)
quantity.value[1] = 5
assert ndd.data[1] != quantity.value[1]
def test_nddata_init_data_masked_quantity():
a = np.array([2, 3])
q = a * u.m
m = False
mq = np.ma.array(q, mask=m)
nd = NDData(mq)
assert_array_equal(nd.data, a)
# This test failed before the change in nddata init because the masked
# arrays data (which in fact was a quantity was directly saved)
assert nd.unit == u.m
assert not isinstance(nd.data, u.Quantity)
np.testing.assert_array_equal(nd.mask, np.array(m))
def test_nddata_init_data_nddata():
nd1 = NDData(np.array([1]))
nd2 = NDData(nd1)
assert nd2.wcs == nd1.wcs
assert nd2.uncertainty == nd1.uncertainty
assert nd2.mask == nd1.mask
assert nd2.unit == nd1.unit
assert nd2.meta == nd1.meta
# Check that it is copied by reference
nd1 = NDData(np.ones((5, 5)))
nd2 = NDData(nd1)
assert nd1.data is nd2.data
# Check that it is really copied if copy=True
nd2 = NDData(nd1, copy=True)
nd1.data[2, 3] = 10
assert nd1.data[2, 3] != nd2.data[2, 3]
# Now let's see what happens if we have all explicitly set
nd1 = NDData(np.array([1]), mask=False, uncertainty=StdDevUncertainty(10), unit=u.s,
meta={'dest': 'mordor'}, wcs=10)
nd2 = NDData(nd1)
assert nd2.data is nd1.data
assert nd2.wcs == nd1.wcs
assert nd2.uncertainty.array == nd1.uncertainty.array
assert nd2.mask == nd1.mask
assert nd2.unit == nd1.unit
assert nd2.meta == nd1.meta
# now what happens if we overwrite them all too
nd3 = NDData(nd1, mask=True, uncertainty=StdDevUncertainty(200), unit=u.km,
meta={'observer': 'ME'}, wcs=4)
assert nd3.data is nd1.data
assert nd3.wcs != nd1.wcs
assert nd3.uncertainty.array != nd1.uncertainty.array
assert nd3.mask != nd1.mask
assert nd3.unit != nd1.unit
assert nd3.meta != nd1.meta
def test_nddata_init_data_nddata_subclass():
uncert = StdDevUncertainty(3)
# There might be some incompatible subclasses of NDData around.
bnd = BadNDDataSubclass(False, True, 3, 2, 'gollum', 100)
# Before changing the NDData init this would not have raised an error but
# would have lead to a compromised nddata instance
with pytest.raises(TypeError):
NDData(bnd)
# but if it has no actual incompatible attributes it passes
bnd_good = BadNDDataSubclass(np.array([1, 2]), uncert, 3, 2,
{'enemy': 'black knight'}, u.km)
nd = NDData(bnd_good)
assert nd.unit == bnd_good.unit
assert nd.meta == bnd_good.meta
assert nd.uncertainty == bnd_good.uncertainty
assert nd.mask == bnd_good.mask
assert nd.wcs == bnd_good.wcs
assert nd.data is bnd_good.data
def test_nddata_init_data_fail():
# First one is sliceable but has no shape, so should fail.
with pytest.raises(TypeError):
NDData({'a': 'dict'})
# This has a shape but is not sliceable
class Shape:
def __init__(self):
self.shape = 5
def __repr__(self):
return '7'
with pytest.raises(TypeError):
NDData(Shape())
def test_nddata_init_data_fakes():
ndd1 = NDData(FakeNumpyArray())
# First make sure that NDData isn't converting its data to a numpy array.
assert isinstance(ndd1.data, FakeNumpyArray)
# Make a new NDData initialized from an NDData
ndd2 = NDData(ndd1)
# Check that the data wasn't converted to numpy
assert isinstance(ndd2.data, FakeNumpyArray)
# Specific parameters
def test_param_uncertainty():
u = StdDevUncertainty(array=np.ones((5, 5)))
d = NDData(np.ones((5, 5)), uncertainty=u)
# Test that the parent_nddata is set.
assert d.uncertainty.parent_nddata is d
# Test conflicting uncertainties (other NDData)
u2 = StdDevUncertainty(array=np.ones((5, 5))*2)
d2 = NDData(d, uncertainty=u2)
assert d2.uncertainty is u2
assert d2.uncertainty.parent_nddata is d2
def test_param_wcs():
# Since everything is allowed we only need to test something
nd = NDData([1], wcs=3)
assert nd.wcs == 3
# Test conflicting wcs (other NDData)
nd2 = NDData(nd, wcs=2)
assert nd2.wcs == 2
def test_param_meta():
# everything dict-like is allowed
with pytest.raises(TypeError):
NDData([1], meta=3)
nd = NDData([1, 2, 3], meta={})
assert len(nd.meta) == 0
nd = NDData([1, 2, 3])
assert isinstance(nd.meta, OrderedDict)
assert len(nd.meta) == 0
# Test conflicting meta (other NDData)
nd2 = NDData(nd, meta={'image': 'sun'})
assert len(nd2.meta) == 1
nd3 = NDData(nd2, meta={'image': 'moon'})
assert len(nd3.meta) == 1
assert nd3.meta['image'] == 'moon'
def test_param_mask():
# Since everything is allowed we only need to test something
nd = NDData([1], mask=False)
assert not nd.mask
# Test conflicting mask (other NDData)
nd2 = NDData(nd, mask=True)
assert nd2.mask
# (masked array)
nd3 = NDData(np.ma.array([1], mask=False), mask=True)
assert nd3.mask
# (masked quantity)
mq = np.ma.array(np.array([2, 3])*u.m, mask=False)
nd4 = NDData(mq, mask=True)
assert nd4.mask
def test_param_unit():
with pytest.raises(ValueError):
NDData(np.ones((5, 5)), unit="NotAValidUnit")
NDData([1, 2, 3], unit='meter')
# Test conflicting units (quantity as data)
q = np.array([1, 2, 3]) * u.m
nd = NDData(q, unit='cm')
assert nd.unit != q.unit
assert nd.unit == u.cm
# (masked quantity)
mq = np.ma.array(np.array([2, 3])*u.m, mask=False)
nd2 = NDData(mq, unit=u.s)
assert nd2.unit == u.s
# (another NDData as data)
nd3 = NDData(nd, unit='km')
assert nd3.unit == u.km
def test_pickle_nddata_with_uncertainty():
ndd = NDData(np.ones(3),
uncertainty=StdDevUncertainty(np.ones(5), unit=u.m),
unit=u.m)
ndd_dumped = pickle.dumps(ndd)
ndd_restored = pickle.loads(ndd_dumped)
assert type(ndd_restored.uncertainty) is StdDevUncertainty
assert ndd_restored.uncertainty.parent_nddata is ndd_restored
assert ndd_restored.uncertainty.unit == u.m
def test_pickle_uncertainty_only():
ndd = NDData(np.ones(3),
uncertainty=StdDevUncertainty(np.ones(5), unit=u.m),
unit=u.m)
uncertainty_dumped = pickle.dumps(ndd.uncertainty)
uncertainty_restored = pickle.loads(uncertainty_dumped)
np.testing.assert_array_equal(ndd.uncertainty.array,
uncertainty_restored.array)
assert ndd.uncertainty.unit == uncertainty_restored.unit
# Even though it has a parent there is no one that references the parent
# after unpickling so the weakref "dies" immediately after unpickling
# finishes.
assert uncertainty_restored.parent_nddata is None
def test_pickle_nddata_without_uncertainty():
ndd = NDData(np.ones(3), unit=u.m)
dumped = pickle.dumps(ndd)
ndd_restored = pickle.loads(dumped)
np.testing.assert_array_equal(ndd.data, ndd_restored.data)
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
from astropy.utils.tests.test_metadata import MetaBaseTest
class TestMetaNDData(MetaBaseTest):
test_class = NDData
args = np.array([[1.]])
# Representation tests
def test_nddata_str():
arr1d = NDData(np.array([1, 2, 3]))
assert str(arr1d) == '[1 2 3]'
arr2d = NDData(np.array([[1, 2], [3, 4]]))
assert str(arr2d) == textwrap.dedent("""
[[1 2]
[3 4]]"""[1:])
arr3d = NDData(np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))
assert str(arr3d) == textwrap.dedent("""
[[[1 2]
[3 4]]
[[5 6]
[7 8]]]"""[1:])
def test_nddata_repr():
arr1d = NDData(np.array([1, 2, 3]))
assert repr(arr1d) == 'NDData([1, 2, 3])'
arr2d = NDData(np.array([[1, 2], [3, 4]]))
assert repr(arr2d) == textwrap.dedent("""
NDData([[1, 2],
[3, 4]])"""[1:])
arr3d = NDData(np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))
assert repr(arr3d) == textwrap.dedent("""
NDData([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])"""[1:])
# Not supported features
def test_slicing_not_supported():
ndd = NDData(np.ones((5, 5)))
with pytest.raises(TypeError):
ndd[0]
def test_arithmetic_not_supported():
ndd = NDData(np.ones((5, 5)))
with pytest.raises(TypeError):
ndd + ndd
|
818769a6809574056c03eb34f174818e9593ea066a95a809c1cc14cc75e30da3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module contains tests of a class equivalent to pre-1.0 NDData.
import pytest
import numpy as np
from astropy.nddata.nddata import NDData
from astropy.nddata.compat import NDDataArray
from astropy.nddata.nduncertainty import StdDevUncertainty
from astropy import units as u
NDDATA_ATTRIBUTES = ['mask', 'flags', 'uncertainty', 'unit', 'shape', 'size',
'dtype', 'ndim', 'wcs', 'convert_unit_to']
def test_nddataarray_has_attributes_of_old_nddata():
ndd = NDDataArray([1, 2, 3])
for attr in NDDATA_ATTRIBUTES:
assert hasattr(ndd, attr)
def test_nddata_simple():
nd = NDDataArray(np.zeros((10, 10)))
assert nd.shape == (10, 10)
assert nd.size == 100
assert nd.dtype == np.dtype(float)
def test_nddata_parameters():
# Test for issue 4620
nd = NDDataArray(data=np.zeros((10, 10)))
assert nd.shape == (10, 10)
assert nd.size == 100
assert nd.dtype == np.dtype(float)
# Change order; `data` has to be given explicitly here
nd = NDDataArray(meta={}, data=np.zeros((10, 10)))
assert nd.shape == (10, 10)
assert nd.size == 100
assert nd.dtype == np.dtype(float)
# Pass uncertainty as second implicit argument
data = np.zeros((10, 10))
uncertainty = StdDevUncertainty(0.1 + np.zeros_like(data))
nd = NDDataArray(data, uncertainty)
assert nd.shape == (10, 10)
assert nd.size == 100
assert nd.dtype == np.dtype(float)
assert nd.uncertainty == uncertainty
def test_nddata_conversion():
nd = NDDataArray(np.array([[1, 2, 3], [4, 5, 6]]))
assert nd.size == 6
assert nd.dtype == np.dtype(int)
@pytest.mark.parametrize('flags_in', [
np.array([True, False]),
np.array([1, 0]),
[True, False],
[1, 0],
np.array(['a', 'b']),
['a', 'b']])
def test_nddata_flags_init_without_np_array(flags_in):
ndd = NDDataArray([1, 1], flags=flags_in)
assert (ndd.flags == flags_in).all()
@pytest.mark.parametrize(('shape'), [(10,), (5, 5), (3, 10, 10)])
def test_nddata_flags_invalid_shape(shape):
with pytest.raises(ValueError) as exc:
NDDataArray(np.zeros((10, 10)), flags=np.ones(shape))
assert exc.value.args[0] == 'dimensions of flags do not match data'
def test_convert_unit_to():
# convert_unit_to should return a copy of its input
d = NDDataArray(np.ones((5, 5)))
d.unit = 'km'
d.uncertainty = StdDevUncertainty(0.1 + np.zeros_like(d))
# workaround because zeros_like does not support dtype arg until v1.6
# and NDData accepts only bool ndarray as mask
tmp = np.zeros_like(d.data)
d.mask = np.array(tmp, dtype=bool)
d1 = d.convert_unit_to('m')
assert np.all(d1.data == np.array(1000.0))
assert np.all(d1.uncertainty.array == 1000.0 * d.uncertainty.array)
assert d1.unit == u.m
# changing the output mask should not change the original
d1.mask[0, 0] = True
assert d.mask[0, 0] != d1.mask[0, 0]
d.flags = np.zeros_like(d.data)
d1 = d.convert_unit_to('m')
# check that subclasses can require wcs and/or unit to be present and use
# _arithmetic and convert_unit_to
class SubNDData(NDDataArray):
"""
Subclass for test initialization of subclasses in NDData._arithmetic and
NDData.convert_unit_to
"""
def __init__(self, *arg, **kwd):
super().__init__(*arg, **kwd)
if self.unit is None:
raise ValueError("Unit for subclass must be specified")
if self.wcs is None:
raise ValueError("WCS for subclass must be specified")
def test_init_of_subclass_in_convert_unit_to():
data = np.ones([10, 10])
arr1 = SubNDData(data, unit='m', wcs=5)
result = arr1.convert_unit_to('km')
np.testing.assert_array_equal(arr1.data, 1000 * result.data)
# Test for issue #4129:
def test_nddataarray_from_nddataarray():
ndd1 = NDDataArray([1., 4., 9.],
uncertainty=StdDevUncertainty([1., 2., 3.]),
flags=[0, 1, 0])
ndd2 = NDDataArray(ndd1)
# Test that the 2 instances point to the same objects and aren't just
# equal; this is explicitly documented for the main data array and we
# probably want to catch any future change in behavior for the other
# attributes too and ensure they are intentional.
assert ndd2.data is ndd1.data
assert ndd2.uncertainty is ndd1.uncertainty
assert ndd2.flags is ndd1.flags
assert ndd2.meta == ndd1.meta
# Test for issue #4137:
def test_nddataarray_from_nddata():
ndd1 = NDData([1., 4., 9.],
uncertainty=StdDevUncertainty([1., 2., 3.]))
ndd2 = NDDataArray(ndd1)
assert ndd2.data is ndd1.data
assert ndd2.uncertainty is ndd1.uncertainty
assert ndd2.meta == ndd1.meta
|
95dc4d43d871b1e8895985b3b0e1bb9723eeeba0714e0a213b95d24f8e9a5487 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import pytest
import numpy as np
from astropy.tests.helper import catch_warnings
from astropy.utils.exceptions import AstropyUserWarning
from astropy import units as u
from astropy.nddata.nddata import NDData
from astropy.nddata.decorators import support_nddata
class CCDData(NDData):
pass
@support_nddata
def wrapped_function_1(data, wcs=None, unit=None):
return data, wcs, unit
def test_pass_numpy():
data_in = np.array([1, 2, 3])
data_out, wcs_out, unit_out = wrapped_function_1(data=data_in)
assert data_out is data_in
assert wcs_out is None
assert unit_out is None
def test_pass_all_separate():
data_in = np.array([1, 2, 3])
wcs_in = "the wcs"
unit_in = u.Jy
data_out, wcs_out, unit_out = wrapped_function_1(data=data_in, wcs=wcs_in, unit=unit_in)
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in
def test_pass_nddata():
data_in = np.array([1, 2, 3])
wcs_in = "the wcs"
unit_in = u.Jy
nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in)
data_out, wcs_out, unit_out = wrapped_function_1(nddata_in)
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in
def test_pass_nddata_and_explicit():
data_in = np.array([1, 2, 3])
wcs_in = "the wcs"
unit_in = u.Jy
unit_in_alt = u.mJy
nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in)
with catch_warnings() as w:
data_out, wcs_out, unit_out = wrapped_function_1(nddata_in, unit=unit_in_alt)
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in_alt
assert len(w) == 1
assert str(w[0].message) == ("Property unit has been passed explicitly and as "
"an NDData property, using explicitly specified value")
def test_pass_nddata_ignored():
data_in = np.array([1, 2, 3])
wcs_in = "the wcs"
unit_in = u.Jy
nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in, mask=[0, 1, 0])
with catch_warnings() as w:
data_out, wcs_out, unit_out = wrapped_function_1(nddata_in)
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in
assert len(w) == 1
assert str(w[0].message) == ("The following attributes were set on the data "
"object, but will be ignored by the function: mask")
def test_incorrect_first_argument():
with pytest.raises(ValueError) as exc:
@support_nddata
def wrapped_function_2(something, wcs=None, unit=None):
pass
assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`"
with pytest.raises(ValueError) as exc:
@support_nddata
def wrapped_function_3(something, data, wcs=None, unit=None):
pass
assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`"
with pytest.raises(ValueError) as exc:
@support_nddata
def wrapped_function_4(wcs=None, unit=None):
pass
assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`"
def test_wrap_function_no_kwargs():
@support_nddata
def wrapped_function_5(data, other_data):
return data
data_in = np.array([1, 2, 3])
nddata_in = NDData(data_in)
assert wrapped_function_5(nddata_in, [1, 2, 3]) is data_in
def test_wrap_function_repack_valid():
@support_nddata(repack=True, returns=['data'])
def wrapped_function_5(data, other_data):
return data
data_in = np.array([1, 2, 3])
nddata_in = NDData(data_in)
nddata_out = wrapped_function_5(nddata_in, [1, 2, 3])
assert isinstance(nddata_out, NDData)
assert nddata_out.data is data_in
def test_wrap_function_accepts():
class MyData(NDData):
pass
@support_nddata(accepts=MyData)
def wrapped_function_5(data, other_data):
return data
data_in = np.array([1, 2, 3])
nddata_in = NDData(data_in)
mydata_in = MyData(data_in)
assert wrapped_function_5(mydata_in, [1, 2, 3]) is data_in
with pytest.raises(TypeError) as exc:
wrapped_function_5(nddata_in, [1, 2, 3])
assert exc.value.args[0] == "Only NDData sub-classes that inherit from MyData can be used by this function"
def test_wrap_preserve_signature_docstring():
@support_nddata
def wrapped_function_6(data, wcs=None, unit=None):
"""
An awesome function
"""
pass
if wrapped_function_6.__doc__ is not None:
assert wrapped_function_6.__doc__.strip() == "An awesome function"
signature = inspect.signature(wrapped_function_6)
assert str(signature) == "(data, wcs=None, unit=None)"
def test_setup_failures1():
# repack but no returns
with pytest.raises(ValueError):
support_nddata(repack=True)
def test_setup_failures2():
# returns but no repack
with pytest.raises(ValueError):
support_nddata(returns=['data'])
def test_setup_failures9():
# keeps but no repack
with pytest.raises(ValueError):
support_nddata(keeps=['unit'])
def test_setup_failures3():
# same attribute in keeps and returns
with pytest.raises(ValueError):
support_nddata(repack=True, keeps=['mask'], returns=['data', 'mask'])
def test_setup_failures4():
# function accepts *args
with pytest.raises(ValueError):
@support_nddata
def test(data, *args):
pass
def test_setup_failures10():
# function accepts **kwargs
with pytest.raises(ValueError):
@support_nddata
def test(data, **kwargs):
pass
def test_setup_failures5():
# function accepts *args (or **kwargs)
with pytest.raises(ValueError):
@support_nddata
def test(data, *args):
pass
def test_setup_failures6():
# First argument is not data
with pytest.raises(ValueError):
@support_nddata
def test(img):
pass
def test_setup_failures7():
# accepts CCDData but was given just an NDData
with pytest.raises(TypeError):
@support_nddata(accepts=CCDData)
def test(data):
pass
test(NDData(np.ones((3, 3))))
def test_setup_failures8():
# function returns a different amount of arguments than specified. Using
# NDData here so we don't get into troubles when creating a CCDData without
# unit!
with pytest.raises(ValueError):
@support_nddata(repack=True, returns=['data', 'mask'])
def test(data):
return 10
test(NDData(np.ones((3, 3)))) # do NOT use CCDData here.
def test_setup_failures11():
# function accepts no arguments
with pytest.raises(ValueError):
@support_nddata
def test():
pass
def test_setup_numpyarray_default():
# It should be possible (even if it's not advisable to use mutable
# defaults) to have a numpy array as default value.
@support_nddata
def func(data, wcs=np.array([1, 2, 3])):
return wcs
def test_still_accepts_other_input():
@support_nddata(repack=True, returns=['data'])
def test(data):
return data
assert isinstance(test(NDData(np.ones((3, 3)))), NDData)
assert isinstance(test(10), int)
assert isinstance(test([1, 2, 3]), list)
def test_accepting_property_normal():
# Accepts a mask attribute and takes it from the input
@support_nddata
def test(data, mask=None):
return mask
ndd = NDData(np.ones((3, 3)))
assert test(ndd) is None
ndd._mask = np.zeros((3, 3))
assert np.all(test(ndd) == 0)
# Use the explicitly given one (raises a Warning)
with catch_warnings(AstropyUserWarning) as w:
assert test(ndd, mask=10) == 10
assert len(w) == 1
def test_parameter_default_identical_to_explicit_passed_argument():
# If the default is identical to the explicitly passed argument this
# should still raise a Warning and use the explicit one.
@support_nddata
def func(data, wcs=[1, 2, 3]):
return wcs
with catch_warnings(AstropyUserWarning) as w:
assert func(NDData(1, wcs=[1, 2]), [1, 2, 3]) == [1, 2, 3]
assert len(w) == 1
with catch_warnings(AstropyUserWarning) as w:
assert func(NDData(1, wcs=[1, 2])) == [1, 2]
assert len(w) == 0
def test_accepting_property_notexist():
# Accepts flags attribute but NDData doesn't have one
@support_nddata
def test(data, flags=10):
return flags
ndd = NDData(np.ones((3, 3)))
test(ndd)
def test_accepting_property_translated():
# Accepts a error attribute and we want to pass in uncertainty!
@support_nddata(mask='masked')
def test(data, masked=None):
return masked
ndd = NDData(np.ones((3, 3)))
assert test(ndd) is None
ndd._mask = np.zeros((3, 3))
assert np.all(test(ndd) == 0)
# Use the explicitly given one (raises a Warning)
with catch_warnings(AstropyUserWarning) as w:
assert test(ndd, masked=10) == 10
assert len(w) == 1
def test_accepting_property_meta_empty():
# Meta is always set (OrderedDict) so it has a special case that it's
# ignored if it's empty but not None
@support_nddata
def test(data, meta=None):
return meta
ndd = NDData(np.ones((3, 3)))
assert test(ndd) is None
ndd._meta = {'a': 10}
assert test(ndd) == {'a': 10}
|
ca815c59f2e316fb5ee9ea9c57ed13e6182a96f5dbe4bfa4fecfa850f7f27a16 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pickle
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from astropy.nddata.nduncertainty import (StdDevUncertainty,
VarianceUncertainty,
InverseVariance,
NDUncertainty,
IncompatibleUncertaintiesException,
MissingDataAssociationException,
UnknownUncertainty)
from astropy.nddata.nddata import NDData
from astropy.nddata.compat import NDDataArray
from astropy.nddata.ccddata import CCDData
from astropy import units as u
# Regarding setter tests:
# No need to test setters since the uncertainty is considered immutable after
# creation except of the parent_nddata attribute and this accepts just
# everything.
# Additionally they should be covered by NDData, NDArithmeticMixin which rely
# on it
# Regarding propagate, _convert_uncert, _propagate_* tests:
# They should be covered by NDArithmeticMixin since there is generally no need
# to test them without this mixin.
# Regarding __getitem__ tests:
# Should be covered by NDSlicingMixin.
# Regarding StdDevUncertainty tests:
# This subclass only overrides the methods for propagation so the same
# they should be covered in NDArithmeticMixin.
# Not really fake but the minimum an uncertainty has to override not to be
# abstract.
class FakeUncertainty(NDUncertainty):
@property
def uncertainty_type(self):
return 'fake'
def _data_unit_to_uncertainty_unit(self, value):
return None
def _propagate_add(self, data, final_data):
pass
def _propagate_subtract(self, data, final_data):
pass
def _propagate_multiply(self, data, final_data):
pass
def _propagate_divide(self, data, final_data):
pass
# Test the fake (added also StdDevUncertainty which should behave identical)
# the list of classes used for parametrization in tests below
uncertainty_types_to_be_tested = [
FakeUncertainty,
StdDevUncertainty,
VarianceUncertainty,
InverseVariance,
UnknownUncertainty
]
@pytest.mark.parametrize(('UncertClass'), uncertainty_types_to_be_tested)
def test_init_fake_with_list(UncertClass):
fake_uncert = UncertClass([1, 2, 3])
assert_array_equal(fake_uncert.array, np.array([1, 2, 3]))
# Copy makes no difference since casting a list to an np.ndarray always
# makes a copy.
# But let's give the uncertainty a unit too
fake_uncert = UncertClass([1, 2, 3], unit=u.adu)
assert_array_equal(fake_uncert.array, np.array([1, 2, 3]))
assert fake_uncert.unit is u.adu
@pytest.mark.parametrize(('UncertClass'), uncertainty_types_to_be_tested)
def test_init_fake_with_ndarray(UncertClass):
uncert = np.arange(100).reshape(10, 10)
fake_uncert = UncertClass(uncert)
# Numpy Arrays are copied by default
assert_array_equal(fake_uncert.array, uncert)
assert fake_uncert.array is not uncert
# Now try it without copy
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array is uncert
# let's provide a unit
fake_uncert = UncertClass(uncert, unit=u.adu)
assert_array_equal(fake_uncert.array, uncert)
assert fake_uncert.array is not uncert
assert fake_uncert.unit is u.adu
@pytest.mark.parametrize(('UncertClass'), uncertainty_types_to_be_tested)
def test_init_fake_with_quantity(UncertClass):
uncert = np.arange(10).reshape(2, 5) * u.adu
fake_uncert = UncertClass(uncert)
# Numpy Arrays are copied by default
assert_array_equal(fake_uncert.array, uncert.value)
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.adu
# Try without copy (should not work, quantity.value always returns a copy)
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.adu
# Now try with an explicit unit parameter too
fake_uncert = UncertClass(uncert, unit=u.m)
assert_array_equal(fake_uncert.array, uncert.value) # No conversion done
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.m # It took the explicit one
@pytest.mark.parametrize(('UncertClass'), uncertainty_types_to_be_tested)
def test_init_fake_with_fake(UncertClass):
uncert = np.arange(5).reshape(5, 1)
fake_uncert1 = UncertClass(uncert)
fake_uncert2 = UncertClass(fake_uncert1)
assert_array_equal(fake_uncert2.array, uncert)
assert fake_uncert2.array is not uncert
# Without making copies
fake_uncert1 = UncertClass(uncert, copy=False)
fake_uncert2 = UncertClass(fake_uncert1, copy=False)
assert_array_equal(fake_uncert2.array, fake_uncert1.array)
assert fake_uncert2.array is fake_uncert1.array
# With a unit
uncert = np.arange(5).reshape(5, 1) * u.adu
fake_uncert1 = UncertClass(uncert)
fake_uncert2 = UncertClass(fake_uncert1)
assert_array_equal(fake_uncert2.array, uncert.value)
assert fake_uncert2.array is not uncert.value
assert fake_uncert2.unit is u.adu
# With a unit and an explicit unit-parameter
fake_uncert2 = UncertClass(fake_uncert1, unit=u.cm)
assert_array_equal(fake_uncert2.array, uncert.value)
assert fake_uncert2.array is not uncert.value
assert fake_uncert2.unit is u.cm
@pytest.mark.parametrize(('UncertClass'), uncertainty_types_to_be_tested)
def test_init_fake_with_somethingElse(UncertClass):
# What about a dict?
uncert = {'rdnoise': 2.9, 'gain': 0.6}
fake_uncert = UncertClass(uncert)
assert fake_uncert.array == uncert
# We can pass a unit too but since we cannot do uncertainty propagation
# the interpretation is up to the user
fake_uncert = UncertClass(uncert, unit=u.s)
assert fake_uncert.array == uncert
assert fake_uncert.unit is u.s
# So, now check what happens if copy is False
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array == uncert
assert id(fake_uncert) != id(uncert)
# dicts cannot be referenced without copy
# TODO : Find something that can be referenced without copy :-)
def test_init_fake_with_StdDevUncertainty():
# Different instances of uncertainties are not directly convertible so this
# should fail
uncert = np.arange(5).reshape(5, 1)
std_uncert = StdDevUncertainty(uncert)
with pytest.raises(IncompatibleUncertaintiesException):
FakeUncertainty(std_uncert)
# Ok try it the other way around
fake_uncert = FakeUncertainty(uncert)
with pytest.raises(IncompatibleUncertaintiesException):
StdDevUncertainty(fake_uncert)
def test_uncertainty_type():
fake_uncert = FakeUncertainty([10, 2])
assert fake_uncert.uncertainty_type == 'fake'
std_uncert = StdDevUncertainty([10, 2])
assert std_uncert.uncertainty_type == 'std'
var_uncert = VarianceUncertainty([10, 2])
assert var_uncert.uncertainty_type == 'var'
ivar_uncert = InverseVariance([10, 2])
assert ivar_uncert.uncertainty_type == 'ivar'
def test_uncertainty_correlated():
fake_uncert = FakeUncertainty([10, 2])
assert not fake_uncert.supports_correlated
std_uncert = StdDevUncertainty([10, 2])
assert std_uncert.supports_correlated
def test_for_leak_with_uncertainty():
# Regression test for memory leak because of cyclic references between
# NDData and uncertainty
from collections import defaultdict
from gc import get_objects
def test_leak(func, specific_objects=None):
"""Function based on gc.get_objects to determine if any object or
a specific object leaks.
It requires a function to be given and if any objects survive the
function scope it's considered a leak (so don't return anything).
"""
before = defaultdict(int)
for i in get_objects():
before[type(i)] += 1
func()
after = defaultdict(int)
for i in get_objects():
after[type(i)] += 1
if specific_objects is None:
assert all(after[k] - before[k] == 0 for k in after)
else:
assert after[specific_objects] - before[specific_objects] == 0
def non_leaker_nddata():
# Without uncertainty there is no reason to assume that there is a
# memory leak but test it nevertheless.
NDData(np.ones(100))
def leaker_nddata():
# With uncertainty there was a memory leak!
NDData(np.ones(100), uncertainty=StdDevUncertainty(np.ones(100)))
test_leak(non_leaker_nddata, NDData)
test_leak(leaker_nddata, NDData)
# Same for NDDataArray:
from astropy.nddata.compat import NDDataArray
def non_leaker_nddataarray():
NDDataArray(np.ones(100))
def leaker_nddataarray():
NDDataArray(np.ones(100), uncertainty=StdDevUncertainty(np.ones(100)))
test_leak(non_leaker_nddataarray, NDDataArray)
test_leak(leaker_nddataarray, NDDataArray)
def test_for_stolen_uncertainty():
# Sharing uncertainties should not overwrite the parent_nddata attribute
ndd1 = NDData(1, uncertainty=1)
ndd2 = NDData(2, uncertainty=ndd1.uncertainty)
# uncertainty.parent_nddata.data should be the original data!
assert ndd1.uncertainty.parent_nddata.data == ndd1.data
assert ndd2.uncertainty.parent_nddata.data == ndd2.data
def test_stddevuncertainty_pickle():
uncertainty = StdDevUncertainty(np.ones(3), unit=u.m)
uncertainty_restored = pickle.loads(pickle.dumps(uncertainty))
np.testing.assert_array_equal(uncertainty.array, uncertainty_restored.array)
assert uncertainty.unit == uncertainty_restored.unit
with pytest.raises(MissingDataAssociationException):
uncertainty_restored.parent_nddata
@pytest.mark.parametrize(('UncertClass'), uncertainty_types_to_be_tested)
def test_quantity(UncertClass):
fake_uncert = UncertClass([1, 2, 3], unit=u.adu)
assert isinstance(fake_uncert.quantity, u.Quantity)
assert fake_uncert.quantity.unit.is_equivalent(u.adu)
fake_uncert_nounit = UncertClass([1, 2, 3])
assert isinstance(fake_uncert_nounit.quantity, u.Quantity)
assert fake_uncert_nounit.quantity.unit.is_equivalent(u.dimensionless_unscaled)
@pytest.mark.parametrize(('UncertClass'),
[VarianceUncertainty,
StdDevUncertainty,
InverseVariance])
def test_setting_uncertainty_unit_results_in_unit_object(UncertClass):
v = UncertClass([1, 1])
v.unit = 'electron'
assert isinstance(v.unit, u.UnitBase)
@pytest.mark.parametrize('NDClass', [NDData, NDDataArray, CCDData])
@pytest.mark.parametrize(('UncertClass'),
[VarianceUncertainty,
StdDevUncertainty,
InverseVariance])
def test_changing_unit_to_value_inconsistent_with_parent_fails(NDClass,
UncertClass):
ndd1 = NDClass(1, unit='adu')
v = UncertClass(1)
# Sets the uncertainty unit to whatever makes sense with this data.
ndd1.uncertainty = v
with pytest.raises(u.UnitConversionError):
# Nothing special about 15 except no one would ever use that unit
v.unit = ndd1.unit ** 15
@pytest.mark.parametrize('NDClass', [NDData, NDDataArray, CCDData])
@pytest.mark.parametrize(('UncertClass, expected_unit'),
[(VarianceUncertainty, u.adu ** 2),
(StdDevUncertainty, u.adu),
(InverseVariance, 1 / u.adu ** 2)])
def test_assigning_uncertainty_to_parent_gives_correct_unit(NDClass,
UncertClass,
expected_unit):
# Does assigning a unitless uncertainty to an NDData result in the
# expected unit?
ndd = NDClass([1, 1], unit=u.adu)
v = UncertClass([1, 1])
ndd.uncertainty = v
assert v.unit == expected_unit
@pytest.mark.parametrize('NDClass', [NDData, NDDataArray, CCDData])
@pytest.mark.parametrize(('UncertClass, expected_unit'),
[(VarianceUncertainty, u.adu ** 2),
(StdDevUncertainty, u.adu),
(InverseVariance, 1 / u.adu ** 2)])
def test_assigning_uncertainty_with_unit_to_parent_with_unit(NDClass,
UncertClass,
expected_unit):
# Does assigning an uncertainty with an appropriate unit to an NDData
# with a unit work?
ndd = NDClass([1, 1], unit=u.adu)
v = UncertClass([1, 1], unit=expected_unit)
ndd.uncertainty = v
assert v.unit == expected_unit
@pytest.mark.parametrize('NDClass', [NDData, NDDataArray, CCDData])
@pytest.mark.parametrize(('UncertClass'),
[(VarianceUncertainty),
(StdDevUncertainty),
(InverseVariance)])
def test_assigning_uncertainty_with_bad_unit_to_parent_fails(NDClass,
UncertClass):
# Does assigning an uncertainty with a non-matching unit to an NDData
# with a unit work?
ndd = NDClass([1, 1], unit=u.adu)
# Set the unit to something inconsistent with ndd's unit
v = UncertClass([1, 1], unit=u.second)
with pytest.raises(u.UnitConversionError):
ndd.uncertainty = v
|
af6dfbeaec97607dfd62836130ca28f0472f1f2653d77a324a12f8761ecaa24f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from astropy.nddata import NDData, NDSlicingMixin
from astropy.nddata.nduncertainty import NDUncertainty, StdDevUncertainty
from astropy import units as u
# Just add the Mixin to NDData
# TODO: Make this use NDDataRef instead!
class NDDataSliceable(NDSlicingMixin, NDData):
pass
# Just some uncertainty (following the StdDevUncertainty implementation of
# storing the uncertainty in a property 'array') with slicing.
class SomeUncertainty(NDUncertainty):
@property
def uncertainty_type(self):
return 'fake'
def _propagate_add(self, data, final_data):
pass
def _propagate_subtract(self, data, final_data):
pass
def _propagate_multiply(self, data, final_data):
pass
def _propagate_divide(self, data, final_data):
pass
def test_slicing_only_data():
data = np.arange(10)
nd = NDDataSliceable(data)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
def test_slicing_data_scalar_fail():
data = np.array(10)
nd = NDDataSliceable(data)
with pytest.raises(TypeError): # as exc
nd[:]
# assert exc.value.args[0] == 'Scalars cannot be sliced.'
def test_slicing_1ddata_ndslice():
data = np.array([10, 20])
nd = NDDataSliceable(data)
# Standard numpy warning here:
with pytest.raises(IndexError):
nd[:, :]
@pytest.mark.parametrize('prop_name', ['mask', 'wcs', 'uncertainty'])
def test_slicing_1dmask_ndslice(prop_name):
# Data is 2d but mask only 1d so this should let the IndexError when
# slicing the mask rise to the user.
data = np.ones((3, 3))
kwarg = {prop_name: np.ones(3)}
nd = NDDataSliceable(data, **kwarg)
# Standard numpy warning here:
with pytest.raises(IndexError):
nd[:, :]
def test_slicing_all_npndarray_1d():
data = np.arange(10)
mask = data > 3
uncertainty = StdDevUncertainty(np.linspace(10, 20, 10))
wcs = np.linspace(1, 1000, 10)
# Just to have them too
unit = u.s
meta = {'observer': 'Brian'}
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs,
unit=unit, meta=meta)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5].array, nd2.uncertainty.array)
assert_array_equal(wcs[2:5], nd2.wcs)
assert unit is nd2.unit
assert meta == nd.meta
def test_slicing_all_npndarray_nd():
# See what happens for multidimensional properties
data = np.arange(1000).reshape(10, 10, 10)
mask = data > 3
uncertainty = np.linspace(10, 20, 1000).reshape(10, 10, 10)
wcs = np.linspace(1, 1000, 1000).reshape(10, 10, 10)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
# Slice only 1D
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5], nd2.uncertainty.array)
assert_array_equal(wcs[2:5], nd2.wcs)
# Slice 3D
nd2 = nd[2:5, :, 4:7]
assert_array_equal(data[2:5, :, 4:7], nd2.data)
assert_array_equal(mask[2:5, :, 4:7], nd2.mask)
assert_array_equal(uncertainty[2:5, :, 4:7], nd2.uncertainty.array)
assert_array_equal(wcs[2:5, :, 4:7], nd2.wcs)
def test_slicing_all_npndarray_shape_diff():
data = np.arange(10)
mask = (data > 3)[0:9]
uncertainty = np.linspace(10, 20, 15)
wcs = np.linspace(1, 1000, 12)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
# All are sliced even if the shapes differ (no Info)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5], nd2.uncertainty.array)
assert_array_equal(wcs[2:5], nd2.wcs)
def test_slicing_all_something_wrong():
data = np.arange(10)
mask = [False]*10
uncertainty = {'rdnoise': 2.9, 'gain': 1.4}
wcs = 145 * u.degree
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
nd2 = nd[2:5]
# Sliced properties:
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
# Not sliced attributes (they will raise a Info nevertheless)
uncertainty is nd2.uncertainty
assert_array_equal(wcs, nd2.wcs)
def test_boolean_slicing():
data = np.arange(10)
mask = data.copy()
uncertainty = StdDevUncertainty(data.copy())
wcs = data.copy()
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
nd2 = nd[(nd.data >= 3) & (nd.data < 8)]
assert_array_equal(data[3:8], nd2.data)
assert_array_equal(mask[3:8], nd2.mask)
assert_array_equal(wcs[3:8], nd2.wcs)
assert_array_equal(uncertainty.array[3:8], nd2.uncertainty.array)
|
6acc4394e701efacfc5c8eb36b7788f217f0714e5403ae27e0994a289dc965bd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from astropy.nddata.nduncertainty import (StdDevUncertainty, VarianceUncertainty,
InverseVariance,
UnknownUncertainty,
IncompatibleUncertaintiesException)
from astropy.nddata import NDDataRef
from astropy.nddata.nddata import NDData
from astropy.units import UnitsError, Quantity
from astropy import units as u
# Alias NDDataAllMixins in case this will be renamed ... :-)
NDDataArithmetic = NDDataRef
class StdDevUncertaintyUncorrelated(StdDevUncertainty):
@property
def supports_correlated(self):
return False
# Test with Data covers:
# scalars, 1D, 2D and 3D
# broadcasting between them
@pytest.mark.parametrize(('data1', 'data2'), [
(np.array(5), np.array(10)),
(np.array(5), np.arange(10)),
(np.array(5), np.arange(10).reshape(2, 5)),
(np.arange(10), np.ones(10) * 2),
(np.arange(10), np.ones((10, 10)) * 2),
(np.arange(10).reshape(2, 5), np.ones((2, 5)) * 3),
(np.arange(1000).reshape(20, 5, 10),
np.ones((20, 5, 10)) * 3)
])
def test_arithmetics_data(data1, data2):
nd1 = NDDataArithmetic(data1)
nd2 = NDDataArithmetic(data2)
# Addition
nd3 = nd1.add(nd2)
assert_array_equal(data1+data2, nd3.data)
# Subtraction
nd4 = nd1.subtract(nd2)
assert_array_equal(data1-data2, nd4.data)
# Multiplication
nd5 = nd1.multiply(nd2)
assert_array_equal(data1*data2, nd5.data)
# Division
nd6 = nd1.divide(nd2)
assert_array_equal(data1/data2, nd6.data)
for nd in [nd3, nd4, nd5, nd6]:
# Check that broadcasting worked as expected
if data1.ndim > data2.ndim:
assert data1.shape == nd.data.shape
else:
assert data2.shape == nd.data.shape
# Check all other attributes are not set
assert nd.unit is None
assert nd.uncertainty is None
assert nd.mask is None
assert len(nd.meta) == 0
assert nd.wcs is None
# Invalid arithmetic operations for data covering:
# not broadcastable data
def test_arithmetics_data_invalid():
nd1 = NDDataArithmetic([1, 2, 3])
nd2 = NDDataArithmetic([1, 2])
with pytest.raises(ValueError):
nd1.add(nd2)
# Test with Data and unit and covers:
# identical units (even dimensionless unscaled vs. no unit),
# equivalent units (such as meter and kilometer)
# equivalent composite units (such as m/s and km/h)
@pytest.mark.parametrize(('data1', 'data2'), [
(np.array(5) * u.s, np.array(10) * u.s),
(np.array(5) * u.s, np.arange(10) * u.h),
(np.array(5) * u.s, np.arange(10).reshape(2, 5) * u.min),
(np.arange(10) * u.m / u.s, np.ones(10) * 2 * u.km / u.s),
(np.arange(10) * u.m / u.s, np.ones((10, 10)) * 2 * u.m / u.h),
(np.arange(10).reshape(2, 5) * u.m / u.s,
np.ones((2, 5)) * 3 * u.km / u.h),
(np.arange(1000).reshape(20, 5, 10),
np.ones((20, 5, 10)) * 3 * u.dimensionless_unscaled),
(np.array(5), np.array(10) * u.s / u.h),
])
def test_arithmetics_data_unit_identical(data1, data2):
nd1 = NDDataArithmetic(data1)
nd2 = NDDataArithmetic(data2)
# Addition
nd3 = nd1.add(nd2)
ref = data1 + data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd3.data)
assert nd3.unit == ref_unit
# Subtraction
nd4 = nd1.subtract(nd2)
ref = data1 - data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd4.data)
assert nd4.unit == ref_unit
# Multiplication
nd5 = nd1.multiply(nd2)
ref = data1 * data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd5.data)
assert nd5.unit == ref_unit
# Division
nd6 = nd1.divide(nd2)
ref = data1 / data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd6.data)
assert nd6.unit == ref_unit
for nd in [nd3, nd4, nd5, nd6]:
# Check that broadcasting worked as expected
if data1.ndim > data2.ndim:
assert data1.shape == nd.data.shape
else:
assert data2.shape == nd.data.shape
# Check all other attributes are not set
assert nd.uncertainty is None
assert nd.mask is None
assert len(nd.meta) == 0
assert nd.wcs is None
# Test with Data and unit and covers:
# not identical not convertible units
# one with unit (which is not dimensionless) and one without
@pytest.mark.parametrize(('data1', 'data2'), [
(np.array(5) * u.s, np.array(10) * u.m),
(np.array(5) * u.Mpc, np.array(10) * u.km / u.s),
(np.array(5) * u.Mpc, np.array(10)),
(np.array(5), np.array(10) * u.s),
])
def test_arithmetics_data_unit_not_identical(data1, data2):
nd1 = NDDataArithmetic(data1)
nd2 = NDDataArithmetic(data2)
# Addition should not be possible
with pytest.raises(UnitsError):
nd1.add(nd2)
# Subtraction should not be possible
with pytest.raises(UnitsError):
nd1.subtract(nd2)
# Multiplication is possible
nd3 = nd1.multiply(nd2)
ref = data1 * data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd3.data)
assert nd3.unit == ref_unit
# Division is possible
nd4 = nd1.divide(nd2)
ref = data1 / data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd4.data)
assert nd4.unit == ref_unit
for nd in [nd3, nd4]:
# Check all other attributes are not set
assert nd.uncertainty is None
assert nd.mask is None
assert len(nd.meta) == 0
assert nd.wcs is None
# Tests with wcs (not very sensible because there is no operation between them
# covering:
# both set and identical/not identical
# one set
# None set
@pytest.mark.parametrize(('wcs1', 'wcs2'), [
(None, None),
(None, 5),
(5, None),
(5, 5),
(7, 5),
])
def test_arithmetics_data_wcs(wcs1, wcs2):
nd1 = NDDataArithmetic(1, wcs=wcs1)
nd2 = NDDataArithmetic(1, wcs=wcs2)
if wcs1 is None and wcs2 is None:
ref_wcs = None
elif wcs1 is None:
ref_wcs = wcs2
elif wcs2 is None:
ref_wcs = wcs1
else:
ref_wcs = wcs1
# Addition
nd3 = nd1.add(nd2)
assert ref_wcs == nd3.wcs
# Subtraction
nd4 = nd1.subtract(nd2)
assert ref_wcs == nd3.wcs
# Multiplication
nd5 = nd1.multiply(nd2)
assert ref_wcs == nd3.wcs
# Division
nd6 = nd1.divide(nd2)
assert ref_wcs == nd3.wcs
for nd in [nd3, nd4, nd5, nd6]:
# Check all other attributes are not set
assert nd.unit is None
assert nd.uncertainty is None
assert len(nd.meta) == 0
assert nd.mask is None
# Masks are completely separated in the NDArithmetics from the data so we need
# no correlated tests but covering:
# masks 1D, 2D and mixed cases with broadcasting
@pytest.mark.parametrize(('mask1', 'mask2'), [
(None, None),
(None, False),
(True, None),
(False, False),
(True, False),
(False, True),
(True, True),
(np.array(False), np.array(True)),
(np.array(False), np.array([0, 1, 0, 1, 1], dtype=np.bool_)),
(np.array(True),
np.array([[0, 1, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=np.bool_)),
(np.array([0, 1, 0, 1, 1], dtype=np.bool_),
np.array([1, 1, 0, 0, 1], dtype=np.bool_)),
(np.array([0, 1, 0, 1, 1], dtype=np.bool_),
np.array([[0, 1, 0, 1, 1], [1, 0, 0, 1, 1]], dtype=np.bool_)),
(np.array([[0, 1, 0, 1, 1], [1, 0, 0, 1, 1]], dtype=np.bool_),
np.array([[0, 1, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=np.bool_)),
])
def test_arithmetics_data_masks(mask1, mask2):
nd1 = NDDataArithmetic(1, mask=mask1)
nd2 = NDDataArithmetic(1, mask=mask2)
if mask1 is None and mask2 is None:
ref_mask = None
elif mask1 is None:
ref_mask = mask2
elif mask2 is None:
ref_mask = mask1
else:
ref_mask = mask1 | mask2
# Addition
nd3 = nd1.add(nd2)
assert_array_equal(ref_mask, nd3.mask)
# Subtraction
nd4 = nd1.subtract(nd2)
assert_array_equal(ref_mask, nd4.mask)
# Multiplication
nd5 = nd1.multiply(nd2)
assert_array_equal(ref_mask, nd5.mask)
# Division
nd6 = nd1.divide(nd2)
assert_array_equal(ref_mask, nd6.mask)
for nd in [nd3, nd4, nd5, nd6]:
# Check all other attributes are not set
assert nd.unit is None
assert nd.uncertainty is None
assert len(nd.meta) == 0
assert nd.wcs is None
# One additional case which can not be easily incorporated in the test above
# what happens if the masks are numpy ndarrays are not broadcastable
def test_arithmetics_data_masks_invalid():
nd1 = NDDataArithmetic(1, mask=np.array([1, 0], dtype=np.bool_))
nd2 = NDDataArithmetic(1, mask=np.array([1, 0, 1], dtype=np.bool_))
with pytest.raises(ValueError):
nd1.add(nd2)
with pytest.raises(ValueError):
nd1.multiply(nd2)
with pytest.raises(ValueError):
nd1.subtract(nd2)
with pytest.raises(ValueError):
nd1.divide(nd2)
# Covering:
# both have uncertainties (data and uncertainty without unit)
# tested against manually determined resulting uncertainties to verify the
# implemented formulas
# this test only works as long as data1 and data2 do not contain any 0
def test_arithmetics_stddevuncertainty_basic():
nd1 = NDDataArithmetic([1, 2, 3], uncertainty=StdDevUncertainty([1, 1, 3]))
nd2 = NDDataArithmetic([2, 2, 2], uncertainty=StdDevUncertainty([2, 2, 2]))
nd3 = nd1.add(nd2)
nd4 = nd2.add(nd1)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.sqrt(np.array([1, 1, 3])**2 + np.array([2, 2, 2])**2)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2)
nd4 = nd2.subtract(nd1)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty (same as for add)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2)
nd4 = nd2.multiply(nd1)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.abs(np.array([2, 4, 6])) * np.sqrt(
(np.array([1, 1, 3]) / np.array([1, 2, 3]))**2 +
(np.array([2, 2, 2]) / np.array([2, 2, 2]))**2)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2)
nd4 = nd2.divide(nd1)
# Inverse operation gives a different uncertainty!
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = np.abs(np.array([1/2, 2/2, 3/2])) * np.sqrt(
(np.array([1, 1, 3]) / np.array([1, 2, 3]))**2 +
(np.array([2, 2, 2]) / np.array([2, 2, 2]))**2)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = np.abs(np.array([2, 1, 2/3])) * np.sqrt(
(np.array([1, 1, 3]) / np.array([1, 2, 3]))**2 +
(np.array([2, 2, 2]) / np.array([2, 2, 2]))**2)
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Tests for correlation, covering
# correlation between -1 and 1 with correlation term being positive / negative
# also with one data being once positive and once completely negative
# The point of this test is to compare the used formula to the theoretical one.
# TODO: Maybe covering units too but I think that should work because of
# the next tests. Also this may be reduced somehow.
@pytest.mark.parametrize(('cor', 'uncert1', 'data2'), [
(-1, [1, 1, 3], [2, 2, 7]),
(-0.5, [1, 1, 3], [2, 2, 7]),
(-0.25, [1, 1, 3], [2, 2, 7]),
(0, [1, 1, 3], [2, 2, 7]),
(0.25, [1, 1, 3], [2, 2, 7]),
(0.5, [1, 1, 3], [2, 2, 7]),
(1, [1, 1, 3], [2, 2, 7]),
(-1, [-1, -1, -3], [2, 2, 7]),
(-0.5, [-1, -1, -3], [2, 2, 7]),
(-0.25, [-1, -1, -3], [2, 2, 7]),
(0, [-1, -1, -3], [2, 2, 7]),
(0.25, [-1, -1, -3], [2, 2, 7]),
(0.5, [-1, -1, -3], [2, 2, 7]),
(1, [-1, -1, -3], [2, 2, 7]),
(-1, [1, 1, 3], [-2, -3, -2]),
(-0.5, [1, 1, 3], [-2, -3, -2]),
(-0.25, [1, 1, 3], [-2, -3, -2]),
(0, [1, 1, 3], [-2, -3, -2]),
(0.25, [1, 1, 3], [-2, -3, -2]),
(0.5, [1, 1, 3], [-2, -3, -2]),
(1, [1, 1, 3], [-2, -3, -2]),
(-1, [-1, -1, -3], [-2, -3, -2]),
(-0.5, [-1, -1, -3], [-2, -3, -2]),
(-0.25, [-1, -1, -3], [-2, -3, -2]),
(0, [-1, -1, -3], [-2, -3, -2]),
(0.25, [-1, -1, -3], [-2, -3, -2]),
(0.5, [-1, -1, -3], [-2, -3, -2]),
(1, [-1, -1, -3], [-2, -3, -2]),
])
def test_arithmetics_stddevuncertainty_basic_with_correlation(
cor, uncert1, data2):
data1 = np.array([1, 2, 3])
data2 = np.array(data2)
uncert1 = np.array(uncert1)
uncert2 = np.array([2, 2, 2])
nd1 = NDDataArithmetic(data1, uncertainty=StdDevUncertainty(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=StdDevUncertainty(uncert2))
nd3 = nd1.add(nd2, uncertainty_correlation=cor)
nd4 = nd2.add(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.sqrt(uncert1**2 + uncert2**2 +
2 * cor * np.abs(uncert1 * uncert2))
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2, uncertainty_correlation=cor)
nd4 = nd2.subtract(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.sqrt(uncert1**2 + uncert2**2 -
2 * cor * np.abs(uncert1 * uncert2))
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2, uncertainty_correlation=cor)
nd4 = nd2.multiply(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = (np.abs(data1 * data2)) * np.sqrt(
(uncert1 / data1)**2 + (uncert2 / data2)**2 +
(2 * cor * np.abs(uncert1 * uncert2) / (data1 * data2)))
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2, uncertainty_correlation=cor)
nd4 = nd2.divide(nd1, uncertainty_correlation=cor)
# Inverse operation gives a different uncertainty!
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = (np.abs(data1 / data2)) * np.sqrt(
(uncert1 / data1)**2 + (uncert2 / data2)**2 -
(2 * cor * np.abs(uncert1 * uncert2) / (data1 * data2)))
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = (np.abs(data2 / data1)) * np.sqrt(
(uncert1 / data1)**2 + (uncert2 / data2)**2 -
(2 * cor * np.abs(uncert1 * uncert2) / (data1 * data2)))
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Tests for correlation, covering
# correlation between -1 and 1 with correlation term being positive / negative
# also with one data being once positive and once completely negative
# The point of this test is to compare the used formula to the theoretical one.
# TODO: Maybe covering units too but I think that should work because of
# the next tests. Also this may be reduced somehow.
@pytest.mark.parametrize(('cor', 'uncert1', 'data2'), [
(-1, [1, 1, 3], [2, 2, 7]),
(-0.5, [1, 1, 3], [2, 2, 7]),
(-0.25, [1, 1, 3], [2, 2, 7]),
(0, [1, 1, 3], [2, 2, 7]),
(0.25, [1, 1, 3], [2, 2, 7]),
(0.5, [1, 1, 3], [2, 2, 7]),
(1, [1, 1, 3], [2, 2, 7]),
(-1, [-1, -1, -3], [2, 2, 7]),
(-0.5, [-1, -1, -3], [2, 2, 7]),
(-0.25, [-1, -1, -3], [2, 2, 7]),
(0, [-1, -1, -3], [2, 2, 7]),
(0.25, [-1, -1, -3], [2, 2, 7]),
(0.5, [-1, -1, -3], [2, 2, 7]),
(1, [-1, -1, -3], [2, 2, 7]),
(-1, [1, 1, 3], [-2, -3, -2]),
(-0.5, [1, 1, 3], [-2, -3, -2]),
(-0.25, [1, 1, 3], [-2, -3, -2]),
(0, [1, 1, 3], [-2, -3, -2]),
(0.25, [1, 1, 3], [-2, -3, -2]),
(0.5, [1, 1, 3], [-2, -3, -2]),
(1, [1, 1, 3], [-2, -3, -2]),
(-1, [-1, -1, -3], [-2, -3, -2]),
(-0.5, [-1, -1, -3], [-2, -3, -2]),
(-0.25, [-1, -1, -3], [-2, -3, -2]),
(0, [-1, -1, -3], [-2, -3, -2]),
(0.25, [-1, -1, -3], [-2, -3, -2]),
(0.5, [-1, -1, -3], [-2, -3, -2]),
(1, [-1, -1, -3], [-2, -3, -2]),
])
def test_arithmetics_varianceuncertainty_basic_with_correlation(
cor, uncert1, data2):
data1 = np.array([1, 2, 3])
data2 = np.array(data2)
uncert1 = np.array(uncert1)**2
uncert2 = np.array([2, 2, 2])**2
nd1 = NDDataArithmetic(data1, uncertainty=VarianceUncertainty(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=VarianceUncertainty(uncert2))
nd3 = nd1.add(nd2, uncertainty_correlation=cor)
nd4 = nd2.add(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = (uncert1 + uncert2 +
2 * cor * np.sqrt(uncert1 * uncert2))
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2, uncertainty_correlation=cor)
nd4 = nd2.subtract(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = (uncert1 + uncert2 -
2 * cor * np.sqrt(uncert1 * uncert2))
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2, uncertainty_correlation=cor)
nd4 = nd2.multiply(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = (data1 * data2)**2 * (
uncert1 / data1**2 + uncert2 / data2**2 +
(2 * cor * np.sqrt(uncert1 * uncert2) / (data1 * data2)))
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2, uncertainty_correlation=cor)
nd4 = nd2.divide(nd1, uncertainty_correlation=cor)
# Inverse operation gives a different uncertainty because of the
# prefactor nd1/nd2 vs nd2/nd1. Howeveare, a large chunk is the same.
ref_common = (
uncert1 / data1**2 + uncert2 / data2**2 -
(2 * cor * np.sqrt(uncert1 * uncert2) / (data1 * data2)))
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = (data1 / data2)**2 * ref_common
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = (data2 / data1)**2 * ref_common
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Tests for correlation, covering
# correlation between -1 and 1 with correlation term being positive / negative
# also with one data being once positive and once completely negative
# The point of this test is to compare the used formula to the theoretical one.
# TODO: Maybe covering units too but I think that should work because of
# the next tests. Also this may be reduced somehow.
@pytest.mark.parametrize(('cor', 'uncert1', 'data2'), [
(-1, [1, 1, 3], [2, 2, 7]),
(-0.5, [1, 1, 3], [2, 2, 7]),
(-0.25, [1, 1, 3], [2, 2, 7]),
(0, [1, 1, 3], [2, 2, 7]),
(0.25, [1, 1, 3], [2, 2, 7]),
(0.5, [1, 1, 3], [2, 2, 7]),
(1, [1, 1, 3], [2, 2, 7]),
(-1, [-1, -1, -3], [2, 2, 7]),
(-0.5, [-1, -1, -3], [2, 2, 7]),
(-0.25, [-1, -1, -3], [2, 2, 7]),
(0, [-1, -1, -3], [2, 2, 7]),
(0.25, [-1, -1, -3], [2, 2, 7]),
(0.5, [-1, -1, -3], [2, 2, 7]),
(1, [-1, -1, -3], [2, 2, 7]),
(-1, [1, 1, 3], [-2, -3, -2]),
(-0.5, [1, 1, 3], [-2, -3, -2]),
(-0.25, [1, 1, 3], [-2, -3, -2]),
(0, [1, 1, 3], [-2, -3, -2]),
(0.25, [1, 1, 3], [-2, -3, -2]),
(0.5, [1, 1, 3], [-2, -3, -2]),
(1, [1, 1, 3], [-2, -3, -2]),
(-1, [-1, -1, -3], [-2, -3, -2]),
(-0.5, [-1, -1, -3], [-2, -3, -2]),
(-0.25, [-1, -1, -3], [-2, -3, -2]),
(0, [-1, -1, -3], [-2, -3, -2]),
(0.25, [-1, -1, -3], [-2, -3, -2]),
(0.5, [-1, -1, -3], [-2, -3, -2]),
(1, [-1, -1, -3], [-2, -3, -2]),
])
def test_arithmetics_inversevarianceuncertainty_basic_with_correlation(
cor, uncert1, data2):
data1 = np.array([1, 2, 3])
data2 = np.array(data2)
uncert1 = 1 / np.array(uncert1)**2
uncert2 = 1 / np.array([2, 2, 2])**2
nd1 = NDDataArithmetic(data1, uncertainty=InverseVariance(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=InverseVariance(uncert2))
nd3 = nd1.add(nd2, uncertainty_correlation=cor)
nd4 = nd2.add(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = 1/ (1 / uncert1 + 1 / uncert2 +
2 * cor / np.sqrt(uncert1 * uncert2))
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2, uncertainty_correlation=cor)
nd4 = nd2.subtract(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = 1 / (1 / uncert1 + 1 / uncert2 -
2 * cor / np.sqrt(uncert1 * uncert2))
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2, uncertainty_correlation=cor)
nd4 = nd2.multiply(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = 1 / ((data1 * data2)**2 * (
1 / uncert1 / data1**2 + 1 / uncert2 / data2**2 +
(2 * cor / np.sqrt(uncert1 * uncert2) / (data1 * data2))))
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2, uncertainty_correlation=cor)
nd4 = nd2.divide(nd1, uncertainty_correlation=cor)
# Inverse operation gives a different uncertainty because of the
# prefactor nd1/nd2 vs nd2/nd1. Howeveare, a large chunk is the same.
ref_common = (
1 / uncert1 / data1**2 + 1 / uncert2 / data2**2 -
(2 * cor / np.sqrt(uncert1 * uncert2) / (data1 * data2)))
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = 1 / ((data1 / data2)**2 * ref_common)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = 1 / ((data2 / data1)**2 * ref_common)
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Covering:
# just an example that a np.ndarray works as correlation, no checks for
# the right result since these were basically done in the function above.
def test_arithmetics_stddevuncertainty_basic_with_correlation_array():
data1 = np.array([1, 2, 3])
data2 = np.array([1, 1, 1])
uncert1 = np.array([1, 1, 1])
uncert2 = np.array([2, 2, 2])
cor = np.array([0, 0.25, 0])
nd1 = NDDataArithmetic(data1, uncertainty=StdDevUncertainty(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=StdDevUncertainty(uncert2))
nd1.add(nd2, uncertainty_correlation=cor)
# Covering:
# That propagate throws an exception when correlation is given but the
# uncertainty does not support correlation.
def test_arithmetics_with_correlation_unsupported():
data1 = np.array([1, 2, 3])
data2 = np.array([1, 1, 1])
uncert1 = np.array([1, 1, 1])
uncert2 = np.array([2, 2, 2])
cor = 3
nd1 = NDDataArithmetic(data1,
uncertainty=StdDevUncertaintyUncorrelated(uncert1))
nd2 = NDDataArithmetic(data2,
uncertainty=StdDevUncertaintyUncorrelated(uncert2))
with pytest.raises(ValueError):
nd1.add(nd2, uncertainty_correlation=cor)
# Covering:
# only one has an uncertainty (data and uncertainty without unit)
# tested against the case where the other one has zero uncertainty. (this case
# must be correct because we tested it in the last case)
# Also verify that if the result of the data has negative values the resulting
# uncertainty has no negative values.
def test_arithmetics_stddevuncertainty_one_missing():
nd1 = NDDataArithmetic([1, -2, 3])
nd1_ref = NDDataArithmetic([1, -2, 3],
uncertainty=StdDevUncertainty([0, 0, 0]))
nd2 = NDDataArithmetic([2, 2, -2],
uncertainty=StdDevUncertainty([2, 2, 2]))
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2.add(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2.subtract(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2.multiply(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2.divide(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Covering:
# data with unit and uncertainty with unit (but equivalent units)
# compared against correctly scaled NDDatas
@pytest.mark.parametrize(('uncert1', 'uncert2'), [
(np.array([1, 2, 3]) * u.m, None),
(np.array([1, 2, 3]) * u.cm, None),
(None, np.array([1, 2, 3]) * u.m),
(None, np.array([1, 2, 3]) * u.cm),
(np.array([1, 2, 3]), np.array([2, 3, 4])),
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.km, np.array([2, 3, 4])) * u.cm,
])
def test_arithmetics_stddevuncertainty_with_units(uncert1, uncert2):
# Data has same units
data1 = np.array([1, 2, 3]) * u.m
data2 = np.array([-4, 7, 0]) * u.m
if uncert1 is not None:
uncert1 = StdDevUncertainty(uncert1)
if isinstance(uncert1, Quantity):
uncert1_ref = uncert1.to_value(data1.unit)
else:
uncert1_ref = uncert1
uncert_ref1 = StdDevUncertainty(uncert1_ref, copy=True)
else:
uncert1 = None
uncert_ref1 = None
if uncert2 is not None:
uncert2 = StdDevUncertainty(uncert2)
if isinstance(uncert2, Quantity):
uncert2_ref = uncert2.to_value(data2.unit)
else:
uncert2_ref = uncert2
uncert_ref2 = StdDevUncertainty(uncert2_ref, copy=True)
else:
uncert2 = None
uncert_ref2 = None
nd1 = NDDataArithmetic(data1, uncertainty=uncert1)
nd2 = NDDataArithmetic(data2, uncertainty=uncert2)
nd1_ref = NDDataArithmetic(data1, uncertainty=uncert_ref1)
nd2_ref = NDDataArithmetic(data2, uncertainty=uncert_ref2)
# Let's start the tests
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2_ref.add(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2_ref.subtract(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2_ref.multiply(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2_ref.divide(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Covering:
# data with unit and uncertainty with unit (but equivalent units)
# compared against correctly scaled NDDatas
@pytest.mark.parametrize(('uncert1', 'uncert2'), [
(np.array([1, 2, 3]) * u.m, None),
(np.array([1, 2, 3]) * u.cm, None),
(None, np.array([1, 2, 3]) * u.m),
(None, np.array([1, 2, 3]) * u.cm),
(np.array([1, 2, 3]), np.array([2, 3, 4])),
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.km, np.array([2, 3, 4])) * u.cm,
])
def test_arithmetics_varianceuncertainty_with_units(uncert1, uncert2):
# Data has same units
data1 = np.array([1, 2, 3]) * u.m
data2 = np.array([-4, 7, 0]) * u.m
if uncert1 is not None:
uncert1 = VarianceUncertainty(uncert1**2)
if isinstance(uncert1, Quantity):
uncert1_ref = uncert1.to_value(data1.unit**2)
else:
uncert1_ref = uncert1
uncert_ref1 = VarianceUncertainty(uncert1_ref, copy=True)
else:
uncert1 = None
uncert_ref1 = None
if uncert2 is not None:
uncert2 = VarianceUncertainty(uncert2**2)
if isinstance(uncert2, Quantity):
uncert2_ref = uncert2.to_value(data2.unit**2)
else:
uncert2_ref = uncert2
uncert_ref2 = VarianceUncertainty(uncert2_ref, copy=True)
else:
uncert2 = None
uncert_ref2 = None
nd1 = NDDataArithmetic(data1, uncertainty=uncert1)
nd2 = NDDataArithmetic(data2, uncertainty=uncert2)
nd1_ref = NDDataArithmetic(data1, uncertainty=uncert_ref1)
nd2_ref = NDDataArithmetic(data2, uncertainty=uncert_ref2)
# Let's start the tests
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2_ref.add(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2_ref.subtract(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2_ref.multiply(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2_ref.divide(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Covering:
# data with unit and uncertainty with unit (but equivalent units)
# compared against correctly scaled NDDatas
@pytest.mark.parametrize(('uncert1', 'uncert2'), [
(np.array([1, 2, 3]) * u.m, None),
(np.array([1, 2, 3]) * u.cm, None),
(None, np.array([1, 2, 3]) * u.m),
(None, np.array([1, 2, 3]) * u.cm),
(np.array([1, 2, 3]), np.array([2, 3, 4])),
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.km, np.array([2, 3, 4])) * u.cm,
])
def test_arithmetics_inversevarianceuncertainty_with_units(uncert1, uncert2):
# Data has same units
data1 = np.array([1, 2, 3]) * u.m
data2 = np.array([-4, 7, 0]) * u.m
if uncert1 is not None:
uncert1 = InverseVariance(1 / uncert1**2)
if isinstance(uncert1, Quantity):
uncert1_ref = uncert1.to_value(1 / data1.unit**2)
else:
uncert1_ref = uncert1
uncert_ref1 = InverseVariance(uncert1_ref, copy=True)
else:
uncert1 = None
uncert_ref1 = None
if uncert2 is not None:
uncert2 = InverseVariance(1 / uncert2**2)
if isinstance(uncert2, Quantity):
uncert2_ref = uncert2.to_value(1 / data2.unit**2)
else:
uncert2_ref = uncert2
uncert_ref2 = InverseVariance(uncert2_ref, copy=True)
else:
uncert2 = None
uncert_ref2 = None
nd1 = NDDataArithmetic(data1, uncertainty=uncert1)
nd2 = NDDataArithmetic(data2, uncertainty=uncert2)
nd1_ref = NDDataArithmetic(data1, uncertainty=uncert_ref1)
nd2_ref = NDDataArithmetic(data2, uncertainty=uncert_ref2)
# Let's start the tests
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2_ref.add(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2_ref.subtract(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2_ref.multiply(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2_ref.divide(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Test abbreviation and long name for taking the first found meta, mask, wcs
@pytest.mark.parametrize(('use_abbreviation'), ['ff', 'first_found'])
def test_arithmetics_handle_switches(use_abbreviation):
meta1 = {'a': 1}
meta2 = {'b': 2}
mask1 = True
mask2 = False
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
wcs1 = 5
wcs2 = 100
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(data1, meta=meta1, mask=mask1, wcs=wcs1,
uncertainty=uncertainty1)
nd2 = NDDataArithmetic(data2, meta=meta2, mask=mask2, wcs=wcs2,
uncertainty=uncertainty2)
nd3 = NDDataArithmetic(data1)
# Both have the attributes but option None is chosen
nd_ = nd1.add(nd2, propagate_uncertainties=None, handle_meta=None,
handle_mask=None, compare_wcs=None)
assert nd_.wcs is None
assert len(nd_.meta) == 0
assert nd_.mask is None
assert nd_.uncertainty is None
# Only second has attributes and False is chosen
nd_ = nd3.add(nd2, propagate_uncertainties=False,
handle_meta=use_abbreviation, handle_mask=use_abbreviation,
compare_wcs=use_abbreviation)
assert nd_.wcs == wcs2
assert nd_.meta == meta2
assert nd_.mask == mask2
assert_array_equal(nd_.uncertainty.array, uncertainty2.array)
# Only first has attributes and False is chosen
nd_ = nd1.add(nd3, propagate_uncertainties=False,
handle_meta=use_abbreviation, handle_mask=use_abbreviation,
compare_wcs=use_abbreviation)
assert nd_.wcs == wcs1
assert nd_.meta == meta1
assert nd_.mask == mask1
assert_array_equal(nd_.uncertainty.array, uncertainty1.array)
def test_arithmetics_meta_func():
def meta_fun_func(meta1, meta2, take='first'):
if take == 'first':
return meta1
else:
return meta2
meta1 = {'a': 1}
meta2 = {'a': 3, 'b': 2}
mask1 = True
mask2 = False
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
wcs1 = 5
wcs2 = 100
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(data1, meta=meta1, mask=mask1, wcs=wcs1,
uncertainty=uncertainty1)
nd2 = NDDataArithmetic(data2, meta=meta2, mask=mask2, wcs=wcs2,
uncertainty=uncertainty2)
nd3 = nd1.add(nd2, handle_meta=meta_fun_func)
assert nd3.meta['a'] == 1
assert 'b' not in nd3.meta
nd4 = nd1.add(nd2, handle_meta=meta_fun_func, meta_take='second')
assert nd4.meta['a'] == 3
assert nd4.meta['b'] == 2
with pytest.raises(KeyError):
nd1.add(nd2, handle_meta=meta_fun_func, take='second')
def test_arithmetics_wcs_func():
def wcs_comp_func(wcs1, wcs2, tolerance=0.1):
if abs(wcs1 - wcs2) <= tolerance:
return True
else:
return False
meta1 = {'a': 1}
meta2 = {'a': 3, 'b': 2}
mask1 = True
mask2 = False
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
wcs1 = 99.99
wcs2 = 100
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(data1, meta=meta1, mask=mask1, wcs=wcs1,
uncertainty=uncertainty1)
nd2 = NDDataArithmetic(data2, meta=meta2, mask=mask2, wcs=wcs2,
uncertainty=uncertainty2)
nd3 = nd1.add(nd2, compare_wcs=wcs_comp_func)
assert nd3.wcs == 99.99
with pytest.raises(ValueError):
nd1.add(nd2, compare_wcs=wcs_comp_func, wcs_tolerance=0.00001)
with pytest.raises(KeyError):
nd1.add(nd2, compare_wcs=wcs_comp_func, tolerance=1)
def test_arithmetics_mask_func():
def mask_sad_func(mask1, mask2, fun=0):
if fun > 0.5:
return mask2
else:
return mask1
meta1 = {'a': 1}
meta2 = {'a': 3, 'b': 2}
mask1 = [True, False, True]
mask2 = [True, False, False]
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
wcs1 = 99.99
wcs2 = 100
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(data1, meta=meta1, mask=mask1, wcs=wcs1,
uncertainty=uncertainty1)
nd2 = NDDataArithmetic(data2, meta=meta2, mask=mask2, wcs=wcs2,
uncertainty=uncertainty2)
nd3 = nd1.add(nd2, handle_mask=mask_sad_func)
assert_array_equal(nd3.mask, nd1.mask)
nd4 = nd1.add(nd2, handle_mask=mask_sad_func, mask_fun=1)
assert_array_equal(nd4.mask, nd2.mask)
with pytest.raises(KeyError):
nd1.add(nd2, handle_mask=mask_sad_func, fun=1)
@pytest.mark.parametrize('meth', ['add', 'subtract', 'divide', 'multiply'])
def test_two_argument_useage(meth):
ndd1 = NDDataArithmetic(np.ones((3, 3)))
ndd2 = NDDataArithmetic(np.ones((3, 3)))
# Call add on the class (not the instance) and compare it with already
# tested useage:
ndd3 = getattr(NDDataArithmetic, meth)(ndd1, ndd2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
# And the same done on an unrelated instance...
ndd3 = getattr(NDDataArithmetic(-100), meth)(ndd1, ndd2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
@pytest.mark.parametrize('meth', ['add', 'subtract', 'divide', 'multiply'])
def test_two_argument_useage_non_nddata_first_arg(meth):
data1 = 50
data2 = 100
# Call add on the class (not the instance)
ndd3 = getattr(NDDataArithmetic, meth)(data1, data2)
# Compare it with the instance-useage and two identical NDData-like
# classes:
ndd1 = NDDataArithmetic(data1)
ndd2 = NDDataArithmetic(data2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
# and check it's also working when called on an instance
ndd3 = getattr(NDDataArithmetic(-100), meth)(data1, data2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
def test_arithmetics_unknown_uncertainties():
# Not giving any uncertainty class means it is saved as UnknownUncertainty
ndd1 = NDDataArithmetic(np.ones((3, 3)),
uncertainty=UnknownUncertainty(np.ones((3, 3))))
ndd2 = NDDataArithmetic(np.ones((3, 3)),
uncertainty=UnknownUncertainty(np.ones((3, 3))*2))
# There is no way to propagate uncertainties:
with pytest.raises(IncompatibleUncertaintiesException):
ndd1.add(ndd2)
# But it should be possible without propagation
ndd3 = ndd1.add(ndd2, propagate_uncertainties=False)
np.testing.assert_array_equal(ndd1.uncertainty.array,
ndd3.uncertainty.array)
ndd4 = ndd1.add(ndd2, propagate_uncertainties=None)
assert ndd4.uncertainty is None
|
4d592fe3c5797a06a88e0a938bdf14326ee2d19110d849ee6afe92a44511e44c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy._erfa import core as erfa, ufunc as erfa_ufunc
from astropy.tests.helper import catch_warnings
from astropy.utils.compat import NUMPY_LT_1_16
def test_output_dim_3_signature():
if NUMPY_LT_1_16:
assert erfa_ufunc.c2i00a.signature == "(),()->(d3, d3)"
else:
assert erfa_ufunc.c2i00a.signature == "(),()->(3, 3)"
def test_erfa_wrapper():
"""
Runs a set of tests that mostly make sure vectorization is
working as expected
"""
jd = np.linspace(2456855.5, 2456855.5+1.0/24.0/60.0, 60*2+1)
ra = np.linspace(0.0, np.pi*2.0, 5)
dec = np.linspace(-np.pi/2.0, np.pi/2.0, 4)
aob, zob, hob, dob, rob, eo = erfa.atco13(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, jd, 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5)
assert aob.shape == (121,)
aob, zob, hob, dob, rob, eo = erfa.atco13(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, jd[0], 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5)
assert aob.shape == ()
aob, zob, hob, dob, rob, eo = erfa.atco13(ra[:, None, None], dec[None, :, None], 0.0, 0.0, 0.0, 0.0, jd[None, None, :], 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5)
(aob.shape) == (5, 4, 121)
iy, im, id, ihmsf = erfa.d2dtf("UTC", 3, jd, 0.0)
assert iy.shape == (121,)
assert ihmsf.shape == (121,)
assert ihmsf.dtype == erfa.dt_hmsf
iy, im, id, ihmsf = erfa.d2dtf("UTC", 3, jd[0], 0.0)
assert iy.shape == ()
assert ihmsf.shape == ()
assert ihmsf.dtype == erfa.dt_hmsf
def test_angle_ops():
sign, idmsf = erfa.a2af(6, -np.pi)
assert sign == b'-'
assert idmsf.item() == (180, 0, 0, 0)
sign, ihmsf = erfa.a2tf(6, np.pi)
assert sign == b'+'
assert ihmsf.item() == (12, 0, 0, 0)
rad = erfa.af2a('-', 180, 0, 0.0)
np.testing.assert_allclose(rad, -np.pi)
rad = erfa.tf2a('+', 12, 0, 0.0)
np.testing.assert_allclose(rad, np.pi)
rad = erfa.anp(3.*np.pi)
np.testing.assert_allclose(rad, np.pi)
rad = erfa.anpm(3.*np.pi)
np.testing.assert_allclose(rad, -np.pi)
sign, ihmsf = erfa.d2tf(1, -1.5)
assert sign == b'-'
assert ihmsf.item() == (36, 0, 0, 0)
days = erfa.tf2d('+', 3, 0, 0.0)
np.testing.assert_allclose(days, 0.125)
def test_spherical_cartesian():
theta, phi = erfa.c2s([0.0, np.sqrt(2.0), np.sqrt(2.0)])
np.testing.assert_allclose(theta, np.pi/2.0)
np.testing.assert_allclose(phi, np.pi/4.0)
theta, phi, r = erfa.p2s([0.0, np.sqrt(2.0), np.sqrt(2.0)])
np.testing.assert_allclose(theta, np.pi/2.0)
np.testing.assert_allclose(phi, np.pi/4.0)
np.testing.assert_allclose(r, 2.0)
pv = np.array(([0.0, np.sqrt(2.0), np.sqrt(2.0)], [1.0, 0.0, 0.0]),
dtype=erfa.dt_pv)
theta, phi, r, td, pd, rd = erfa.pv2s(pv)
np.testing.assert_allclose(theta, np.pi/2.0)
np.testing.assert_allclose(phi, np.pi/4.0)
np.testing.assert_allclose(r, 2.0)
np.testing.assert_allclose(td, -np.sqrt(2.0)/2.0)
np.testing.assert_allclose(pd, 0.0)
np.testing.assert_allclose(rd, 0.0)
c = erfa.s2c(np.pi/2.0, np.pi/4.0)
np.testing.assert_allclose(c, [0.0, np.sqrt(2.0)/2.0, np.sqrt(2.0)/2.0], atol=1e-14)
c = erfa.s2p(np.pi/2.0, np.pi/4.0, 1.0)
np.testing.assert_allclose(c, [0.0, np.sqrt(2.0)/2.0, np.sqrt(2.0)/2.0], atol=1e-14)
pv = erfa.s2pv(np.pi/2.0, np.pi/4.0, 2.0, np.sqrt(2.0)/2.0, 0.0, 0.0)
np.testing.assert_allclose(pv['p'], [0.0, np.sqrt(2.0), np.sqrt(2.0)], atol=1e-14)
np.testing.assert_allclose(pv['v'], [-1.0, 0.0, 0.0], atol=1e-14)
def test_errwarn_reporting():
"""
Test that the ERFA error reporting mechanism works as it should
"""
# no warning
erfa.dat(1990, 1, 1, 0.5)
# check warning is raised for a scalar
with catch_warnings() as w:
erfa.dat(100, 1, 1, 0.5)
assert len(w) == 1
assert w[0].category == erfa.ErfaWarning
assert '1 of "dubious year (Note 1)"' in str(w[0].message)
# and that the count is right for a vector.
with catch_warnings() as w:
erfa.dat([100, 200, 1990], 1, 1, 0.5)
assert len(w) == 1
assert w[0].category == erfa.ErfaWarning
assert '2 of "dubious year (Note 1)"' in str(w[0].message)
try:
erfa.dat(1990, [1, 34, 2], [1, 1, 43], 0.5)
except erfa.ErfaError as e:
if '1 of "bad day (Note 3)", 1 of "bad month"' not in e.args[0]:
assert False, 'Raised the correct type of error, but wrong message: ' + e.args[0]
try:
erfa.dat(200, [1, 34, 2], [1, 1, 43], 0.5)
except erfa.ErfaError as e:
if 'warning' in e.args[0]:
assert False, 'Raised the correct type of error, but there were warnings mixed in: ' + e.args[0]
def test_vector_inouts():
"""
Tests that ERFA functions working with vectors are correctly consumed and spit out
"""
# values are from test_erfa.c t_ab function
pnat = [-0.76321968546737951,
-0.60869453983060384,
-0.21676408580639883]
v = [2.1044018893653786e-5,
-8.9108923304429319e-5,
-3.8633714797716569e-5]
s = 0.99980921395708788
bm1 = 0.99999999506209258
expected = [-0.7631631094219556269,
-0.6087553082505590832,
-0.2167926269368471279]
res = erfa.ab(pnat, v, s, bm1)
assert res.shape == (3,)
np.testing.assert_allclose(res, expected)
res2 = erfa.ab([pnat]*4, v, s, bm1)
assert res2.shape == (4, 3)
np.testing.assert_allclose(res2, [expected]*4)
# here we stride an array and also do it Fortran-order to make sure
# it all still works correctly with non-contig arrays
pnata = np.array(pnat)
arrin = np.array([pnata, pnata/2, pnata/3, pnata/4, pnata/5]*4, order='F')
res3 = erfa.ab(arrin[::5], v, s, bm1)
assert res3.shape == (4, 3)
np.testing.assert_allclose(res3, [expected]*4)
def test_pv_in():
jd1 = 2456165.5
jd2 = 0.401182685
pv = np.empty((), dtype=erfa.dt_pv)
pv['p'] = [-6241497.16,
401346.896,
-1251136.04]
pv['v'] = [-29.264597,
-455.021831,
0.0266151194]
astrom = erfa.apcs13(jd1, jd2, pv)
assert astrom.shape == ()
# values from t_erfa_c
np.testing.assert_allclose(astrom['pmt'], 12.65133794027378508)
np.testing.assert_allclose(astrom['em'], 1.010428384373318379)
np.testing.assert_allclose(astrom['eb'], [0.9012691529023298391,
-.4173999812023068781,
-.1809906511146821008])
np.testing.assert_allclose(astrom['bpn'], np.eye(3))
# first make sure it *fails* if we mess with the input orders
pvbad = np.empty_like(pv)
pvbad['p'], pvbad['v'] = pv['v'], pv['p']
astrombad = erfa.apcs13(jd1, jd2, pvbad)
assert not np.allclose(astrombad['em'], 1.010428384373318379)
pvarr = np.array([pv]*3)
astrom2 = erfa.apcs13(jd1, jd2, pvarr)
assert astrom2.shape == (3,)
np.testing.assert_allclose(astrom2['em'], 1.010428384373318379)
# try striding of the input array to make non-contiguous
pvmatarr = np.array([pv]*9)[::3]
astrom3 = erfa.apcs13(jd1, jd2, pvmatarr)
assert astrom3.shape == (3,)
np.testing.assert_allclose(astrom3['em'], 1.010428384373318379)
def test_structs():
"""
Checks producing and consuming of ERFA c structs
"""
am, eo = erfa.apci13(2456165.5, [0.401182685, 1])
assert am.shape == (2, )
assert am.dtype == erfa.dt_eraASTROM
assert eo.shape == (2, )
# a few spotchecks from test_erfa.c
np.testing.assert_allclose(am[0]['pmt'], 12.65133794027378508)
np.testing.assert_allclose(am[0]['v'], [0.4289638897157027528e-4,
0.8115034002544663526e-4,
0.3517555122593144633e-4])
ri, di = erfa.atciqz(2.71, 0.174, am[0])
np.testing.assert_allclose(ri, 2.709994899247599271)
np.testing.assert_allclose(di, 0.1728740720983623469)
|
0318f2b888d50602785e3ccdb03eb51374992d751576603b77aac43103c76527 | from .low_level_api import *
from .high_level_api import *
from .high_level_wcs_wrapper import *
from .sliced_low_level_wcs import *
|
6ad22f24633d55b2ba54b300485c24ce74d48b7f5078c2aebe9e1680245d89eb | from .high_level_api import HighLevelWCSMixin
from . import BaseLowLevelWCS
__all__ = ['HighLevelWCSWrapper']
class HighLevelWCSWrapper(HighLevelWCSMixin):
"""
Wrapper class that can take any :class:`~astropy.wcs.wcsapi.BaseLowLevelWCS`
object and expose the high-level WCS API.
"""
def __init__(self, low_level_wcs):
if not isinstance(low_level_wcs, BaseLowLevelWCS):
raise TypeError('Input to a HighLevelWCSWrapper must be a low level WCS object')
self._low_level_wcs = low_level_wcs
@property
def low_level_wcs(self):
return self._low_level_wcs
@property
def pixel_n_dim(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim`
"""
return self.low_level_wcs.pixel_n_dim
@property
def world_n_dim(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim`
"""
return self.low_level_wcs.world_n_dim
@property
def world_axis_physical_types(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_physical_types`
"""
return self.low_level_wcs.world_axis_physical_types
@property
def world_axis_units(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`
"""
return self.low_level_wcs.world_axis_units
@property
def array_shape(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.array_shape`
"""
return self.low_level_wcs.array_shape
@property
def pixel_bounds(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_bounds`
"""
return self.low_level_wcs.pixel_bounds
@property
def axis_correlation_matrix(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.axis_correlation_matrix`
"""
return self.low_level_wcs.pixel_bounds
|
4c8d43623299b774ce71b0020e71b36902f98af99e8ba0634d2ba82d25a79285 | import os
import abc
import numpy as np
__all__ = ['BaseLowLevelWCS', 'validate_physical_types']
class BaseLowLevelWCS(metaclass=abc.ABCMeta):
"""
Abstract base class for the low-level WCS interface.
This is described in `APE 14: A shared Python interface for World Coordinate
Systems <https://doi.org/10.5281/zenodo.1188875>`_.
"""
@property
@abc.abstractmethod
def pixel_n_dim(self):
"""
The number of axes in the pixel coordinate system.
"""
@property
@abc.abstractmethod
def world_n_dim(self):
"""
The number of axes in the world coordinate system.
"""
@property
@abc.abstractmethod
def world_axis_physical_types(self):
"""
An iterable of strings describing the physical type for each world axis.
These should be names from the VO UCD1+ controlled Vocabulary
(http://www.ivoa.net/documents/latest/UCDlist.html). If no matching UCD
type exists, this can instead be ``"custom:xxx"``, where ``xxx`` is an
arbitrary string. Alternatively, if the physical type is
unknown/undefined, an element can be `None`.
"""
@property
@abc.abstractmethod
def world_axis_units(self):
"""
An iterable of strings given the units of the world coordinates for each
axis.
The strings should follow the `IVOA VOUnit standard
<http://ivoa.net/documents/VOUnits/>`_ (though as noted in the VOUnit
specification document, units that do not follow this standard are still
allowed, but just not recommended).
"""
@abc.abstractmethod
def pixel_to_world_values(self, *pixel_arrays):
"""
Convert pixel coordinates to world coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays as
input, and pixel coordinates should be zero-based. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays in units given by
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Note that pixel coordinates are
assumed to be 0 at the center of the first pixel in each dimension. If a
pixel is in a region where the WCS is not defined, NaN can be returned.
The coordinates should be specified in the ``(x, y)`` order, where for
an image, ``x`` is the horizontal coordinate and ``y`` is the vertical
coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
@abc.abstractmethod
def array_index_to_world_values(self, *index_arrays):
"""
Convert array indices to world coordinates.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` except that
the indices should be given in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`).
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
@abc.abstractmethod
def world_to_pixel_values(self, *world_arrays):
"""
Convert world coordinates to pixel coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays as
input in units given by `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays. Note that pixel
coordinates are assumed to be 0 at the center of the first pixel in each
dimension. If a world coordinate does not have a matching pixel
coordinate, NaN can be returned. The coordinates should be returned in
the ``(x, y)`` order, where for an image, ``x`` is the horizontal
coordinate and ``y`` is the vertical coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
@abc.abstractmethod
def world_to_array_index_values(self, *world_arrays):
"""
Convert world coordinates to array indices.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` except that
the indices should be returned in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`). The indices should be
returned as rounded integers.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
@property
@abc.abstractmethod
def world_axis_object_components(self):
"""
A list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` elements giving information
on constructing high-level objects for the world coordinates.
Each element of the list is a tuple with three items:
* The first is a name for the world object this world array
corresponds to, which *must* match the string names used in
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`. Note that names might
appear twice because two world arrays might correspond to a single
world object (e.g. a celestial coordinate might have both “ra” and
“dec” arrays, which correspond to a single sky coordinate object).
* The second element is either a string keyword argument name or a
positional index for the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`.
* The third argument is a string giving the name of the property
to access on the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes` in order to get numerical
values.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples.
"""
@property
@abc.abstractmethod
def world_axis_object_classes(self):
"""
A dictionary giving information on constructing high-level objects for
the world coordinates.
Each key of the dictionary is a string key from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components`, and each value is a
tuple with three elements:
* The first element of the tuple must be a class or a string specifying
the fully-qualified name of a class, which will specify the actual
Python object to be created.
* The second element, should be a tuple specifying the positional
arguments required to initialize the class. If
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components` specifies that the
world coordinates should be passed as a positional argument, this this
tuple should include `None` placeholders for the world coordinates.
* The last tuple element must be a dictionary with the keyword
arguments required to initialize the class.
Note that we don't require the classes to be Astropy classes since there
is no guarantee that Astropy will have all the classes to represent all
kinds of world coordinates. Furthermore, we recommend that the output be
kept as human-readable as possible.
The classes used here should have the ability to do conversions by
passing an instance as the first argument to the same class with
different arguments (e.g. ``Time(Time(...), scale='tai')``). This is
a requirement for the implementation of the high-level interface.
The second and third tuple elements for each value of this dictionary
can in turn contain either instances of classes, or if necessary can
contain serialized versions that should take the same form as the main
classes described above (a tuple with three elements with the fully
qualified name of the class, then the positional arguments and the
keyword arguments). For low-level API objects implemented in Python, we
recommend simply returning the actual objects (not the serialized form)
for optimal performance. Implementations should either always or never
use serialized classes to represent Python objects, and should indicate
which of these they follow using the
`~astropy.wcs.wcsapi.BaseLowLevelWCS.serialized_classes` attribute.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples .
"""
# The following three properties have default fallback implementations, so
# they are not abstract.
@property
def array_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(row, column)``
order (the convention for arrays in Python).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
"""
return None
@property
def pixel_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(x, y)``
order (where for an image, ``x`` is the horizontal coordinate and ``y``
is the vertical coordinate).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
If you are interested in getting a shape that is comparable to that of
a Numpy array, you should use
`~astropy.wcs.wcsapi.BaseLowLevelWCS.array_shape` instead.
"""
return None
@property
def pixel_bounds(self):
"""
The bounds (in pixel coordinates) inside which the WCS is defined,
as a list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` ``(min, max)`` tuples.
The bounds should be given in ``[(xmin, xmax), (ymin, ymax)]``
order. WCS solutions are sometimes only guaranteed to be accurate
within a certain range of pixel values, for example when defining a
WCS that includes fitted distortions. This is an optional property,
and it should return `None` if a shape is not known or relevant.
"""
return None
@property
def axis_correlation_matrix(self):
"""
Returns an (`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim`,
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`) matrix that indicates using booleans
whether a given world coordinate depends on a given pixel coordinate.
This defaults to a matrix where all elements are `True` in the absence of
any further information. For completely independent axes, the diagonal
would be `True` and all other entries `False`.
"""
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
@property
def serialized_classes(self):
"""
Indicates whether Python objects are given in serialized form or as
actual Python objects.
"""
return False
def __str__(self):
# Overall header
s = '{0} Transformation\n\n'.format(self.__class__.__name__)
s += ('This transformation has {0} pixel and {1} world dimensions\n\n'
.format(self.pixel_n_dim, self.world_n_dim))
s += 'Array shape (Numpy order): {0}\n\n'.format(self.array_shape)
# Pixel dimensions table
array_shape = self.array_shape or (0,)
pixel_shape = self.pixel_shape or (None,) * self.pixel_n_dim
# Find largest between header size and value length
pixel_dim_width = max(9, len(str(self.pixel_n_dim)))
pixel_siz_width = max(9, len(str(max(array_shape))))
s += (('{0:' + str(pixel_dim_width) + 's}').format('Pixel Dim') + ' ' +
('{0:' + str(pixel_siz_width) + 's}').format('Data size') + ' ' +
'Bounds\n')
for ipix in range(self.pixel_n_dim):
s += (('{0:' + str(pixel_dim_width) + 'd}').format(ipix) + ' ' +
(" "*5 + str(None) if pixel_shape[ipix] is None else
('{0:' + str(pixel_siz_width) + 'd}').format(pixel_shape[ipix])) + ' ' +
'{0:s}'.format(str(None if self.pixel_bounds is None else self.pixel_bounds[ipix]) + '\n'))
s += '\n'
# World dimensions table
# Find largest between header size and value length
world_dim_width = max(9, len(str(self.world_n_dim)))
world_typ_width = max(13, max(len(x) for x in self.world_axis_physical_types))
s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') + ' ' +
('{0:' + str(world_typ_width) + 's}').format('Physical Type') + ' ' +
'Units\n')
for iwrl in range(self.world_n_dim):
s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) + ' ' +
('{0:' + str(world_typ_width) + 's}').format(self.world_axis_physical_types[iwrl]) + ' ' +
'{0:s}'.format(self.world_axis_units[iwrl] + '\n'))
s += '\n'
# Axis correlation matrix
pixel_dim_width = max(3, len(str(self.world_n_dim)))
s += 'Correlation between pixel and world axes:\n\n'
s += (' ' * world_dim_width + ' ' +
('{0:^' + str(self.pixel_n_dim * 5 - 2) + 's}').format('Pixel Dim') +
'\n')
s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') +
''.join([' ' + ('{0:' + str(pixel_dim_width) + 'd}').format(ipix)
for ipix in range(self.pixel_n_dim)]) +
'\n')
matrix = self.axis_correlation_matrix
matrix_str = np.empty(matrix.shape, dtype='U3')
matrix_str[matrix] = 'yes'
matrix_str[~matrix] = 'no'
for iwrl in range(self.world_n_dim):
s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) +
''.join([' ' + ('{0:>' + str(pixel_dim_width) + 's}').format(matrix_str[iwrl, ipix])
for ipix in range(self.pixel_n_dim)]) +
'\n')
# Make sure we get rid of the extra whitespace at the end of some lines
return '\n'.join([l.rstrip() for l in s.splitlines()])
__repr__ = __str__
UCDS_FILE = os.path.join(os.path.dirname(__file__), 'data', 'ucds.txt')
with open(UCDS_FILE) as f:
VALID_UCDS = set([x.strip() for x in f.read().splitlines()[1:]])
def validate_physical_types(physical_types):
"""
Validate a list of physical types against the UCD1+ standard
"""
for physical_type in physical_types:
if (physical_type is not None and
physical_type not in VALID_UCDS and
not physical_type.startswith('custom:')):
raise ValueError("Invalid physical type: {0}".format(physical_type))
|
3f7f579714aa6e6b94a41d7e9dd4c5a392715361845502354ea7bce5dd0f90a9 | import importlib
def deserialize_class(tpl, construct=True):
"""
Deserialize classes recursively.
"""
if not isinstance(tpl, tuple) or len(tpl) != 3:
raise ValueError("Expected a tuple of three values")
module, klass = tpl[0].rsplit('.', 1)
module = importlib.import_module(module)
klass = getattr(module, klass)
args = tuple([deserialize_class(arg) if isinstance(arg, tuple) else arg for arg in tpl[1]])
kwargs = dict((key, deserialize_class(val)) if isinstance(val, tuple) else (key, val) for (key, val) in tpl[2].items())
if construct:
return klass(*args, **kwargs)
else:
return klass, args, kwargs
|
602a06ae7983b79440ea168b1c1431e0cfb6300c1a53378bb870731cc33a34fb | import numbers
import numpy as np
from astropy.wcs.wcsapi import BaseLowLevelWCS
__all__ = ['sanitize_slices', 'SlicedLowLevelWCS']
def sanitize_slices(slices, ndim):
"""
Given a set of input
"""
if not isinstance(slices, (tuple, list)): # We just have a single int
slices = (slices,)
slices = list(slices)
if Ellipsis in slices:
if slices.count(Ellipsis) > 1:
raise IndexError("an index can only have a single ellipsis ('...')")
# Replace the Ellipsis with the correct number of slice(None)s
e_ind = slices.index(Ellipsis)
slices.remove(Ellipsis)
n_e = ndim - len(slices)
for i in range(n_e):
ind = e_ind + i
slices.insert(ind, slice(None))
for i in range(ndim):
if i < len(slices):
slc = slices[i]
if isinstance(slc, slice):
if slc.step and slc.step != 1:
raise ValueError("Slicing WCS with a step is not supported.")
elif not isinstance(slc, numbers.Integral):
raise ValueError("Only integer or range slices are accepted.")
else:
slices.append(slice(None))
return slices
class SlicedLowLevelWCS(BaseLowLevelWCS):
def __init__(self, wcs, slices):
self._wcs = wcs
self._slices_array = sanitize_slices(slices, self._wcs.pixel_n_dim)
self._slices_pixel = self._slices_array[::-1]
# figure out which pixel dimensions have been kept, then use axis correlation
# matrix to figure out which world dims are kept
self._pixel_keep = np.nonzero([not isinstance(self._slices_pixel[ip], numbers.Integral)
for ip in range(self._wcs.pixel_n_dim)])[0]
# axis_correlation_matrix[world, pixel]
self._world_keep = np.nonzero(
self._wcs.axis_correlation_matrix[:, self._pixel_keep].any(axis=1))[0]
@property
def pixel_n_dim(self):
return len(self._pixel_keep)
@property
def world_n_dim(self):
return len(self._world_keep)
@property
def world_axis_physical_types(self):
return [self._wcs.world_axis_physical_types[i] for i in self._world_keep]
@property
def world_axis_units(self):
return [self._wcs.world_axis_units[i] for i in self._world_keep]
def pixel_to_world_values(self, *pixel_arrays):
pixel_arrays_new = []
ipix_curr = -1
for ipix in range(self._wcs.pixel_n_dim):
if isinstance(self._slices_pixel[ipix], int):
pixel_arrays_new.append(self._slices_pixel[ipix])
else:
ipix_curr += 1
if self._slices_pixel[ipix].start is not None:
pixel_arrays_new.append(pixel_arrays[ipix_curr] + self._slices_pixel[ipix].start)
else:
pixel_arrays_new.append(pixel_arrays[ipix_curr])
world_arrays = self._wcs.pixel_to_world_values(*pixel_arrays_new)
return [world_arrays[iw] for iw in self._world_keep]
def array_index_to_world_values(self, *index_arrays):
return self.pixel_to_world_values(*index_arrays[::-1])
def world_to_pixel_values(self, *world_arrays):
world_arrays_new = []
iworld_curr = -1
for iworld in range(self._wcs.world_n_dim):
if iworld in self._world_keep:
iworld_curr += 1
world_arrays_new.append(world_arrays[iworld_curr])
else:
world_arrays_new.append(1.)
pixel_arrays = list(self._wcs.world_to_pixel_values(*world_arrays_new))
for ipixel in range(self._wcs.pixel_n_dim):
if isinstance(self._slices_pixel[ipixel], slice) and self._slices_pixel[ipixel].start is not None:
pixel_arrays[ipixel] -= self._slices_pixel[ipixel].start
return [pixel_arrays[ip] for ip in self._pixel_keep]
def world_to_array_index_values(self, *world_arrays):
pixel_arrays = self.world_to_pixel_values(*world_arrays, 0)[::-1]
array_indices = tuple(np.asarray(np.floor(pixel + 0.5), dtype=np.int) for pixel in pixel_arrays)
return array_indices
@property
def world_axis_object_components(self):
return [self._wcs.world_axis_object_components[idx] for idx in self._world_keep]
@property
def world_axis_object_classes(self):
keys_keep = [item[0] for item in self.world_axis_object_components]
return dict([item for item in self._wcs.world_axis_object_classes.items() if item[0] in keys_keep])
@property
def array_shape(self):
if self._wcs.array_shape:
return np.broadcast_to(0, self._wcs.array_shape)[tuple(self._slices_array)].shape
@property
def pixel_shape(self):
if self.array_shape:
return self.array_shape[::-1]
@property
def pixel_bounds(self):
if self._wcs.pixel_bounds is None:
return None
bounds = []
for idx in self._pixel_keep:
if self._slices_pixel[idx].start is None:
bounds.append(self._wcs.pixel_bounds[idx])
else:
imin, imax = self._wcs.pixel_bounds[idx]
start = self._slices_pixel[idx].start
bounds.append((imin - start, imax - start))
return bounds
@property
def axis_correlation_matrix(self):
return self._wcs.axis_correlation_matrix[self._world_keep][:,self._pixel_keep]
|
b7b222c5e2bb634c8fbeed21d1ce0aa7a432807c043a938ae2a0267f7e553d18 | import abc
from collections import defaultdict, OrderedDict
import numpy as np
from .utils import deserialize_class
__all__ = ['BaseHighLevelWCS', 'HighLevelWCSMixin']
def rec_getattr(obj, att):
for a in att.split('.'):
obj = getattr(obj, a)
return obj
def default_order(components):
order = []
for key, _, _ in components:
if key not in order:
order.append(key)
return order
class BaseHighLevelWCS(metaclass=abc.ABCMeta):
"""
Abstract base class for the high-level WCS interface.
This is described in `APE 14: A shared Python interface for World Coordinate
Systems <https://doi.org/10.5281/zenodo.1188875>`_.
"""
@property
@abc.abstractmethod
def low_level_wcs(self):
"""
Returns a reference to the underlying low-level WCS object.
"""
@abc.abstractmethod
def pixel_to_world(self, *pixel_arrays):
"""
Convert pixel coordinates to world coordinates (represented by high-level
objects).
If a single high-level object is used to represent the world
coordinates, it is returned as-is (not in a tuple/list), otherwise a
tuple of high-level objects is returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` for pixel
indexing and ordering conventions.
"""
@abc.abstractmethod
def array_index_to_world(self, *index_arrays):
"""
Convert array indices to world coordinates (represented by Astropy
objects).
If a single high-level object is used to represent the world
coordinates, it is returned as-is (not in a tuple/list), otherwise a
tuple of high-level objects is returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.array_index_to_world_values` for
pixel indexing and ordering conventions.
"""
@abc.abstractmethod
def world_to_pixel(self, *world_objects):
"""
Convert world coordinates (represented by Astropy objects) to pixel
coordinates.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` for pixel
indexing and ordering conventions.
"""
@abc.abstractmethod
def world_to_array_index(self, *world_objects):
"""
Convert world coordinates (represented by Astropy objects) to array
indices.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_array_index_values` for
pixel indexing and ordering conventions. The indices should be returned
as rounded integers.
"""
class HighLevelWCSMixin(BaseHighLevelWCS):
"""
Mix-in class that automatically provides the high-level WCS API for the
low-level WCS object given by the `~HighLevelWCSMixin.low_level_wcs`
property.
"""
@property
def low_level_wcs(self):
return self
def world_to_pixel(self, *world_objects):
# Cache the classes and components since this may be expensive
serialized_classes = self.low_level_wcs.world_axis_object_classes
components = self.low_level_wcs.world_axis_object_components
# Deserialize world_axis_object_classes using the default order
classes = OrderedDict()
for key in default_order(components):
if self.low_level_wcs.serialized_classes:
classes[key] = deserialize_class(serialized_classes[key],
construct=False)
else:
classes[key] = serialized_classes[key]
# Check that the number of classes matches the number of inputs
if len(world_objects) != len(classes):
raise ValueError("Number of world inputs ({0}) does not match "
"expected ({1})".format(len(world_objects), len(classes)))
# Determine whether the classes are uniquely matched, that is we check
# whether there is only one of each class.
world_by_key = {}
unique_match = True
for w in world_objects:
matches = []
for key, (klass, _, _) in classes.items():
if isinstance(w, klass):
matches.append(key)
if len(matches) == 1:
world_by_key[matches[0]] = w
else:
unique_match = False
break
# If the match is not unique, the order of the classes needs to match,
# whereas if all classes are unique, we can still intelligently match
# them even if the order is wrong.
objects = {}
if unique_match:
for key, (klass, args, kwargs) in classes.items():
# FIXME: For now SkyCoord won't auto-convert upon initialization
# https://github.com/astropy/astropy/issues/7689
from astropy.coordinates import SkyCoord
if isinstance(world_by_key[key], SkyCoord):
if 'frame' in kwargs:
objects[key] = world_by_key[key].transform_to(kwargs['frame'])
else:
objects[key] = world_by_key[key]
else:
objects[key] = klass(world_by_key[key], *args, **kwargs)
else:
for ikey, key in enumerate(classes):
klass, args, kwargs = classes[key]
w = world_objects[ikey]
if not isinstance(w, klass):
raise ValueError("Expected the following order of world "
"arguments: {0}".format(', '.join([k.__name__ for (k, _, _) in classes.values()])))
# FIXME: For now SkyCoord won't auto-convert upon initialization
# https://github.com/astropy/astropy/issues/7689
from astropy.coordinates import SkyCoord
if isinstance(w, SkyCoord):
if 'frame' in kwargs:
objects[key] = w.transform_to(kwargs['frame'])
else:
objects[key] = w
else:
objects[key] = klass(w, *args, **kwargs)
# We now extract the attributes needed for the world values
world = []
for key, _, attr in components:
world.append(rec_getattr(objects[key], attr))
# Finally we convert to pixel coordinates
pixel = self.low_level_wcs.world_to_pixel_values(*world)
return pixel
def pixel_to_world(self, *pixel_arrays):
# Compute the world coordinate values
world = self.low_level_wcs.pixel_to_world_values(*pixel_arrays)
if self.world_n_dim == 1:
world = (world,)
# Cache the classes and components since this may be expensive
components = self.low_level_wcs.world_axis_object_components
classes = self.low_level_wcs.world_axis_object_classes
# Deserialize classes
if self.low_level_wcs.serialized_classes:
classes_new = {}
for key, value in classes.items():
classes_new[key] = deserialize_class(value, construct=False)
classes = classes_new
args = defaultdict(list)
kwargs = defaultdict(dict)
for i, (key, attr, _) in enumerate(components):
if isinstance(attr, str):
kwargs[key][attr] = world[i]
else:
while attr > len(args[key]) - 1:
args[key].append(None)
args[key][attr] = world[i]
result = []
for key in default_order(components):
klass, ar, kw = classes[key]
result.append(klass(*args[key], *ar, **kwargs[key], **kw))
if len(result) == 1:
return result[0]
else:
return result
def array_index_to_world(self, *index_arrays):
return self.pixel_to_world(*index_arrays[::-1])
def world_to_array_index(self, *world_objects):
if self.pixel_n_dim == 1:
return np.round(self.world_to_pixel(*world_objects)).astype(int)
else:
return tuple(np.round(self.world_to_pixel(*world_objects)[::-1]).astype(int).tolist())
|
b3da6dcdc7fa4da23de87750bb2e0dc46a88f4f0cd0f3e1289172467777eec05 | # This file includes the definition of a mix-in class that provides the low-
# and high-level WCS API to the astropy.wcs.WCS object. We keep this code
# isolated in this mix-in class to avoid making the main wcs.py file too
# long.
import warnings
import numpy as np
from astropy import units as u
from .low_level_api import BaseLowLevelWCS
from .high_level_api import HighLevelWCSMixin
from .sliced_low_level_wcs import SlicedLowLevelWCS
__all__ = ['custom_ctype_to_ucd_mapping', 'SlicedFITSWCS', 'FITSWCSAPIMixin']
# Mapping from CTYPE axis name to UCD1
CTYPE_TO_UCD1 = {
# Celestial coordinates
'RA': 'pos.eq.ra',
'DEC': 'pos.eq.dec',
'GLON': 'pos.galactic.lon',
'GLAT': 'pos.galactic.lat',
'ELON': 'pos.ecliptic.lon',
'ELAT': 'pos.ecliptic.lat',
'TLON': 'pos.bodyrc.lon',
'TLAT': 'pos.bodyrc.lat',
'HPLT': 'custom:pos.helioprojective.lat',
'HPLN': 'custom:pos.helioprojective.lon',
# Spectral coordinates (WCS paper 3)
'FREQ': 'em.freq', # Frequency
'ENER': 'em.energy', # Energy
'WAVN': 'em.wavenumber', # Wavenumber
'WAVE': 'em.wl', # Vacuum wavelength
'VRAD': 'spect.dopplerVeloc.radio', # Radio velocity
'VOPT': 'spect.dopplerVeloc.opt', # Optical velocity
'ZOPT': 'src.redshift', # Redshift
'AWAV': 'em.wl', # Air wavelength
'VELO': 'spect.dopplerVeloc', # Apparent radial velocity
'BETA': 'custom:spect.doplerVeloc.beta', # Beta factor (v/c)
# Time coordinates (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)
'TIME': 'time',
'TAI': 'time',
'TT': 'time',
'TDT': 'time',
'ET': 'time',
'IAT': 'time',
'UT1': 'time',
'UTC': 'time',
'GMT': 'time',
'GPS': 'time',
'TCG': 'time',
'TCB': 'time',
'TDB': 'time',
'LOCAL': 'time'
# UT() is handled separately in world_axis_physical_types
}
# Keep a list of additional custom mappings that have been registered. This
# is kept as a list in case nested context managers are used
CTYPE_TO_UCD1_CUSTOM = []
class custom_ctype_to_ucd_mapping:
"""
A context manager that makes it possible to temporarily add new CTYPE to
UCD1+ mapping used by :attr:`FITSWCSAPIMixin.world_axis_physical_types`.
Parameters
----------
mapping : dict
A dictionary mapping a CTYPE value to a UCD1+ value
Examples
--------
Consider a WCS with the following CTYPE::
>>> from astropy.wcs import WCS
>>> wcs = WCS(naxis=1)
>>> wcs.wcs.ctype = ['SPAM']
By default, :attr:`FITSWCSAPIMixin.world_axis_physical_types` returns `None`,
but this can be overriden::
>>> wcs.world_axis_physical_types
[None]
>>> with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
... wcs.world_axis_physical_types
['food.spam']
"""
def __init__(self, mapping):
CTYPE_TO_UCD1_CUSTOM.insert(0, mapping)
self.mapping = mapping
def __enter__(self):
pass
def __exit__(self, type, value, tb):
CTYPE_TO_UCD1_CUSTOM.remove(self.mapping)
class SlicedFITSWCS(SlicedLowLevelWCS, HighLevelWCSMixin):
pass
class FITSWCSAPIMixin(BaseLowLevelWCS, HighLevelWCSMixin):
"""
A mix-in class that is intended to be inherited by the
:class:`~astropy.wcs.WCS` class and provides the low- and high-level WCS API
"""
@property
def pixel_n_dim(self):
return self.naxis
@property
def world_n_dim(self):
return len(self.wcs.ctype)
@property
def array_shape(self):
if self._naxis == [0, 0]:
return None
else:
return tuple(self._naxis[::-1])
@array_shape.setter
def array_shape(self, value):
if value is None:
self._naxis = [0, 0]
else:
if len(value) != self.naxis:
raise ValueError("The number of data axes, "
"{}, does not equal the "
"shape {}.".format(self.naxis, len(value)))
self._naxis = list(value)[::-1]
@property
def pixel_shape(self):
if self._naxis == [0, 0]:
return None
else:
return tuple(self._naxis)
@pixel_shape.setter
def pixel_shape(self, value):
if value is None:
self._naxis = [0, 0]
else:
if len(value) != self.naxis:
raise ValueError("The number of data axes, "
"{}, does not equal the "
"shape {}.".format(self.naxis, len(value)))
self._naxis = list(value)
@property
def pixel_bounds(self):
return self._pixel_bounds
@pixel_bounds.setter
def pixel_bounds(self, value):
if value is None:
self._pixel_bounds = value
else:
if len(value) != self.naxis:
raise ValueError("The number of data axes, "
"{}, does not equal the number of "
"pixel bounds {}.".format(self.naxis, len(value)))
self._pixel_bounds = list(value)
@property
def world_axis_physical_types(self):
types = []
for axis_type in self.axis_type_names:
if axis_type.startswith('UT('):
types.append('time')
else:
for custom_mapping in CTYPE_TO_UCD1_CUSTOM:
if axis_type in custom_mapping:
types.append(custom_mapping[axis_type])
break
else:
types.append(CTYPE_TO_UCD1.get(axis_type, None))
return types
@property
def world_axis_units(self):
units = []
for unit in self.wcs.cunit:
if unit is None:
unit = ''
elif isinstance(unit, u.Unit):
unit = unit.to_string(format='vounit')
else:
try:
unit = u.Unit(unit).to_string(format='vounit')
except u.UnitsError:
unit = ''
units.append(unit)
return units
@property
def axis_correlation_matrix(self):
# If there are any distortions present, we assume that there may be
# correlations between all axes. Maybe if some distortions only apply
# to the image plane we can improve this?
if self.has_distortion:
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
# Assuming linear world coordinates along each axis, the correlation
# matrix would be given by whether or not the PC matrix is zero
matrix = self.wcs.get_pc() != 0
# We now need to check specifically for celestial coordinates since
# these can assume correlations because of spherical distortions. For
# each celestial coordinate we copy over the pixel dependencies from
# the other celestial coordinates.
celestial = (self.wcs.axis_types // 1000) % 10 == 2
celestial_indices = np.nonzero(celestial)[0]
for world1 in celestial_indices:
for world2 in celestial_indices:
if world1 != world2:
matrix[world1] |= matrix[world2]
matrix[world2] |= matrix[world1]
return matrix
def pixel_to_world_values(self, *pixel_arrays):
world = self.all_pix2world(*pixel_arrays, 0)
return world[0] if self.world_n_dim == 1 else world
def array_index_to_world_values(self, *indices):
world = self.all_pix2world(*indices[::-1], 0)
return world[0] if self.world_n_dim == 1 else world
def world_to_pixel_values(self, *world_arrays):
pixel = self.all_world2pix(*world_arrays, 0)
return pixel[0] if self.pixel_n_dim == 1 else pixel
def world_to_array_index_values(self, *world_arrays):
pixel_arrays = self.all_world2pix(*world_arrays, 0)[::-1]
array_indices = tuple(np.asarray(np.floor(pixel + 0.5), dtype=np.int) for pixel in pixel_arrays)
return array_indices[0] if self.pixel_n_dim == 1 else array_indices
@property
def world_axis_object_components(self):
return self._get_components_and_classes()[0]
@property
def world_axis_object_classes(self):
return self._get_components_and_classes()[1]
@property
def serialized_classes(self):
return False
def _get_components_and_classes(self):
# The aim of this function is to return whatever is needed for
# world_axis_object_components and world_axis_object_classes. It's easier
# to figure it out in one go and then return the values and let the
# properties return part of it.
# Since this method might get called quite a few times, we need to cache
# it. We start off by defining a hash based on the attributes of the
# WCS that matter here (we can't just use the WCS object as a hash since
# it is mutable)
wcs_hash = (self.naxis,
list(self.wcs.ctype),
list(self.wcs.cunit),
self.wcs.radesys,
self.wcs.equinox,
self.wcs.dateobs,
self.wcs.lng,
self.wcs.lat)
# If the cache is present, we need to check that the 'hash' matches.
if getattr(self, '_components_and_classes_cache', None) is not None:
cache = self._components_and_classes_cache
if cache[0] == wcs_hash:
return cache[1]
else:
self._components_and_classes_cache = None
# Avoid circular imports by importing here
from astropy.wcs.utils import wcs_to_celestial_frame
from astropy.coordinates import SkyCoord
components = [None] * self.naxis
classes = {}
# Let's start off by checking whether the WCS has a pair of celestial
# components
if self.has_celestial:
frame = wcs_to_celestial_frame(self)
kwargs = {}
kwargs['frame'] = frame
kwargs['unit'] = u.deg
classes['celestial'] = (SkyCoord, (), kwargs)
components[self.wcs.lng] = ('celestial', 0, 'spherical.lon.degree')
components[self.wcs.lat] = ('celestial', 1, 'spherical.lat.degree')
# Fallback: for any remaining components that haven't been identified, just
# return Quantity as the class to use
if 'time' in self.world_axis_physical_types:
warnings.warn('In future, times will be represented by the Time class '
'instead of Quantity', FutureWarning)
for i in range(self.naxis):
if components[i] is None:
name = self.axis_type_names[i].lower()
if name == '':
name = 'world'
while name in classes:
name += "_"
classes[name] = (u.Quantity, (), {'unit': self.wcs.cunit[i]})
components[i] = (name, 0, 'value')
# Keep a cached version of result
self._components_and_classes_cache = wcs_hash, (components, classes)
return components, classes
|
b0c03e15267454c072e858c967b103ae8feaa37a44b98b1da2d2f406b8eb6b4a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from astropy.utils.data import get_pkg_data_contents, get_pkg_data_filename
from astropy.time import Time
from astropy import units as u
from astropy.wcs.wcs import WCS, Sip, WCSSUB_LONGITUDE, WCSSUB_LATITUDE
from astropy.wcs.wcsapi.fitswcs import SlicedFITSWCS
from astropy.wcs.utils import (proj_plane_pixel_scales,
is_proj_plane_distorted,
non_celestial_pixel_scales,
wcs_to_celestial_frame,
celestial_frame_to_wcs, skycoord_to_pixel,
pixel_to_skycoord, custom_wcs_to_frame_mappings,
custom_frame_to_wcs_mappings,
add_stokes_axis_to_wcs)
def test_wcs_dropping():
wcs = WCS(naxis=4)
wcs.wcs.pc = np.zeros([4, 4])
np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5))
pc = wcs.wcs.pc # for later use below
dropped = wcs.dropaxis(0)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))
dropped = wcs.dropaxis(1)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))
dropped = wcs.dropaxis(2)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))
dropped = wcs.dropaxis(3)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))
wcs = WCS(naxis=4)
wcs.wcs.cd = pc
dropped = wcs.dropaxis(0)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))
dropped = wcs.dropaxis(1)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))
dropped = wcs.dropaxis(2)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))
dropped = wcs.dropaxis(3)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))
def test_wcs_swapping():
wcs = WCS(naxis=4)
wcs.wcs.pc = np.zeros([4, 4])
np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5))
pc = wcs.wcs.pc # for later use below
swapped = wcs.swapaxes(0, 1)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4]))
swapped = wcs.swapaxes(0, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1]))
swapped = wcs.swapaxes(2, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3]))
wcs = WCS(naxis=4)
wcs.wcs.cd = pc
swapped = wcs.swapaxes(0, 1)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4]))
swapped = wcs.swapaxes(0, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1]))
swapped = wcs.swapaxes(2, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3]))
@pytest.mark.parametrize('ndim', (2, 3))
def test_add_stokes(ndim):
wcs = WCS(naxis=ndim)
for ii in range(ndim + 1):
outwcs = add_stokes_axis_to_wcs(wcs, ii)
assert outwcs.wcs.naxis == ndim + 1
assert outwcs.wcs.ctype[ii] == 'STOKES'
assert outwcs.wcs.cname[ii] == 'STOKES'
def test_slice():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
mywcs._naxis = [1000, 500]
pscale = 0.1 # from cdelt
slice_wcs = mywcs.slice([slice(1, None), slice(0, None)])
assert np.all(slice_wcs.wcs.crpix == np.array([1, 0]))
assert slice_wcs._naxis == [1000, 499]
# test that CRPIX maps to CRVAL:
assert_allclose(
slice_wcs.wcs_pix2world(*slice_wcs.wcs.crpix, 1),
slice_wcs.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)])
assert np.all(slice_wcs.wcs.crpix == np.array([0.625, 0.25]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
assert slice_wcs._naxis == [250, 250]
slice_wcs = mywcs.slice([slice(None, None, 2), slice(0, None, 2)])
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.2]))
assert slice_wcs._naxis == [500, 250]
# Non-integral values do not alter the naxis attribute
slice_wcs = mywcs.slice([slice(50.), slice(20.)])
assert slice_wcs._naxis == [1000, 500]
slice_wcs = mywcs.slice([slice(50.), slice(20)])
assert slice_wcs._naxis == [20, 500]
slice_wcs = mywcs.slice([slice(50), slice(20.5)])
assert slice_wcs._naxis == [1000, 50]
def test_slice_with_sip():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
mywcs._naxis = [1000, 500]
mywcs.wcs.ctype = ['RA---TAN-SIP', 'DEC--TAN-SIP']
a = np.array(
[[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[ -2.81029767e-13, 0.0, 0.0, 0.0, 0.0]]
)
b = np.array(
[[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0]]
)
mywcs.sip = Sip(a, b, None, None, mywcs.wcs.crpix)
mywcs.wcs.set()
pscale = 0.1 # from cdelt
slice_wcs = mywcs.slice([slice(1, None), slice(0, None)])
# test that CRPIX maps to CRVAL:
assert_allclose(
slice_wcs.all_pix2world(*slice_wcs.wcs.crpix, 1),
slice_wcs.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)])
# test that CRPIX maps to CRVAL:
assert_allclose(
slice_wcs.all_pix2world(*slice_wcs.wcs.crpix, 1),
slice_wcs.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
def test_slice_getitem():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
slice_wcs = mywcs[1::2, 0::4]
assert np.all(slice_wcs.wcs.crpix == np.array([0.625, 0.25]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
mywcs.wcs.crpix = [2, 2]
slice_wcs = mywcs[1::2, 0::4]
assert np.all(slice_wcs.wcs.crpix == np.array([0.875, 0.75]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
# Default: numpy order
slice_wcs = mywcs[1::2]
assert np.all(slice_wcs.wcs.crpix == np.array([2, 0.75]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.1, 0.2]))
def test_slice_fitsorder():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
slice_wcs = mywcs.slice([slice(1, None), slice(0, None)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0, 1]))
slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0.25, 0.625]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.4]))
slice_wcs = mywcs.slice([slice(1, None, 2)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0.25, 1]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.1]))
def test_slice_wcs():
mywcs = WCS(naxis=2)
sub = mywcs[0]
assert isinstance(sub, SlicedFITSWCS)
with pytest.raises(ValueError) as exc:
mywcs[0, ::2]
assert exc.value.args[0] == "Slicing WCS with a step is not supported."
def test_axis_names():
mywcs = WCS(naxis=4)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT-LSR', 'STOKES']
assert mywcs.axis_type_names == ['RA', 'DEC', 'VOPT', 'STOKES']
mywcs.wcs.cname = ['RA', 'DEC', 'VOPT', 'STOKES']
assert mywcs.axis_type_names == ['RA', 'DEC', 'VOPT', 'STOKES']
def test_celestial():
mywcs = WCS(naxis=4)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT', 'STOKES']
cel = mywcs.celestial
assert tuple(cel.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert cel.axis_type_names == ['RA', 'DEC']
def test_wcs_to_celestial_frame():
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates.builtin_frames import ICRS, ITRS, FK5, FK4, Galactic
mywcs = WCS(naxis=2)
mywcs.wcs.set()
with pytest.raises(ValueError) as exc:
assert wcs_to_celestial_frame(mywcs) is None
assert exc.value.args[0] == "Could not determine celestial frame corresponding to the specified WCS object"
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['XOFFSET', 'YOFFSET']
mywcs.wcs.set()
with pytest.raises(ValueError):
assert wcs_to_celestial_frame(mywcs) is None
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
mywcs.wcs.equinox = 1987.
mywcs.wcs.set()
print(mywcs.to_header())
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, FK5)
assert frame.equinox == Time(1987., format='jyear')
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
mywcs.wcs.equinox = 1982
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, FK4)
assert frame.equinox == Time(1982., format='byear')
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['GLON-SIN', 'GLAT-SIN']
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, Galactic)
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['TLON-CAR', 'TLAT-CAR']
mywcs.wcs.dateobs = '2017-08-17T12:41:04.430'
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ITRS)
assert frame.obstime == Time('2017-08-17T12:41:04.430')
for equinox in [np.nan, 1987, 1982]:
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
mywcs.wcs.radesys = 'ICRS'
mywcs.wcs.equinox = equinox
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
# Flipped order
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['DEC--TAN', 'RA---TAN']
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
# More than two dimensions
mywcs = WCS(naxis=3)
mywcs.wcs.ctype = ['DEC--TAN', 'VELOCITY', 'RA---TAN']
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
mywcs = WCS(naxis=3)
mywcs.wcs.ctype = ['GLAT-CAR', 'VELOCITY', 'GLON-CAR']
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, Galactic)
def test_wcs_to_celestial_frame_correlated():
# Regression test for a bug that caused wcs_to_celestial_frame to fail when
# the celestial axes were correlated with other axes.
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates.builtin_frames import ICRS
mywcs = WCS(naxis=3)
mywcs.wcs.ctype = 'RA---TAN', 'DEC--TAN', 'FREQ'
mywcs.wcs.cd = np.ones((3, 3))
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
def test_wcs_to_celestial_frame_extend():
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['XOFFSET', 'YOFFSET']
mywcs.wcs.set()
with pytest.raises(ValueError):
wcs_to_celestial_frame(mywcs)
class OffsetFrame:
pass
def identify_offset(wcs):
if wcs.wcs.ctype[0].endswith('OFFSET') and wcs.wcs.ctype[1].endswith('OFFSET'):
return OffsetFrame()
with custom_wcs_to_frame_mappings(identify_offset):
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, OffsetFrame)
# Check that things are back to normal after the context manager
with pytest.raises(ValueError):
wcs_to_celestial_frame(mywcs)
def test_celestial_frame_to_wcs():
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import ICRS, ITRS, FK5, FK4, FK4NoETerms, Galactic, BaseCoordinateFrame
class FakeFrame(BaseCoordinateFrame):
pass
frame = FakeFrame()
with pytest.raises(ValueError) as exc:
celestial_frame_to_wcs(frame)
assert exc.value.args[0] == ("Could not determine WCS corresponding to "
"the specified coordinate frame.")
frame = ICRS()
mywcs = celestial_frame_to_wcs(frame)
mywcs.wcs.set()
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'ICRS'
assert np.isnan(mywcs.wcs.equinox)
assert mywcs.wcs.lonpole == 180
assert mywcs.wcs.latpole == 0
frame = FK5(equinox='J1987')
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'FK5'
assert mywcs.wcs.equinox == 1987.
frame = FK4(equinox='B1982')
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'FK4'
assert mywcs.wcs.equinox == 1982.
frame = FK4NoETerms(equinox='B1982')
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'FK4-NO-E'
assert mywcs.wcs.equinox == 1982.
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('GLON-TAN', 'GLAT-TAN')
assert mywcs.wcs.radesys == ''
assert np.isnan(mywcs.wcs.equinox)
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
assert tuple(mywcs.wcs.ctype) == ('GLON-CAR', 'GLAT-CAR')
assert mywcs.wcs.radesys == ''
assert np.isnan(mywcs.wcs.equinox)
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
mywcs.wcs.crval = [100, -30]
mywcs.wcs.set()
assert_allclose((mywcs.wcs.lonpole, mywcs.wcs.latpole), (180, 60))
frame = ITRS(obstime=Time('2017-08-17T12:41:04.43'))
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
assert tuple(mywcs.wcs.ctype) == ('TLON-CAR', 'TLAT-CAR')
assert mywcs.wcs.radesys == 'ITRS'
assert mywcs.wcs.dateobs == '2017-08-17T12:41:04.430'
frame = ITRS()
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
assert tuple(mywcs.wcs.ctype) == ('TLON-CAR', 'TLAT-CAR')
assert mywcs.wcs.radesys == 'ITRS'
assert mywcs.wcs.dateobs == Time('J2000').utc.fits
def test_celestial_frame_to_wcs_extend():
class OffsetFrame:
pass
frame = OffsetFrame()
with pytest.raises(ValueError):
celestial_frame_to_wcs(frame)
def identify_offset(frame, projection=None):
if isinstance(frame, OffsetFrame):
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['XOFFSET', 'YOFFSET']
return wcs
with custom_frame_to_wcs_mappings(identify_offset):
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('XOFFSET', 'YOFFSET')
# Check that things are back to normal after the context manager
with pytest.raises(ValueError):
celestial_frame_to_wcs(frame)
def test_pixscale_nodrop():
mywcs = WCS(naxis=2)
mywcs.wcs.cdelt = [0.1, 0.2]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
mywcs.wcs.cdelt = [-0.1, 0.2]
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
def test_pixscale_withdrop():
mywcs = WCS(naxis=3)
mywcs.wcs.cdelt = [0.1, 0.2, 1]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT']
assert_almost_equal(proj_plane_pixel_scales(mywcs.celestial), (0.1, 0.2))
mywcs.wcs.cdelt = [-0.1, 0.2, 1]
assert_almost_equal(proj_plane_pixel_scales(mywcs.celestial), (0.1, 0.2))
def test_pixscale_cd():
mywcs = WCS(naxis=2)
mywcs.wcs.cd = [[-0.1, 0], [0, 0.2]]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
@pytest.mark.parametrize('angle',
(30, 45, 60, 75))
def test_pixscale_cd_rotated(angle):
mywcs = WCS(naxis=2)
rho = np.radians(angle)
scale = 0.1
mywcs.wcs.cd = [[scale * np.cos(rho), -scale * np.sin(rho)],
[scale * np.sin(rho), scale * np.cos(rho)]]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.1))
@pytest.mark.parametrize('angle',
(30, 45, 60, 75))
def test_pixscale_pc_rotated(angle):
mywcs = WCS(naxis=2)
rho = np.radians(angle)
scale = 0.1
mywcs.wcs.cdelt = [-scale, scale]
mywcs.wcs.pc = [[np.cos(rho), -np.sin(rho)],
[np.sin(rho), np.cos(rho)]]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.1))
@pytest.mark.parametrize(('cdelt', 'pc', 'pccd'),
(([0.1, 0.2], np.eye(2), np.diag([0.1, 0.2])),
([0.1, 0.2, 0.3], np.eye(3), np.diag([0.1, 0.2, 0.3])),
([1, 1, 1], np.diag([0.1, 0.2, 0.3]), np.diag([0.1, 0.2, 0.3]))))
def test_pixel_scale_matrix(cdelt, pc, pccd):
mywcs = WCS(naxis=(len(cdelt)))
mywcs.wcs.cdelt = cdelt
mywcs.wcs.pc = pc
assert_almost_equal(mywcs.pixel_scale_matrix, pccd)
@pytest.mark.parametrize(('ctype', 'cel'),
((['RA---TAN', 'DEC--TAN'], True),
(['RA---TAN', 'DEC--TAN', 'FREQ'], False),
(['RA---TAN', 'FREQ'], False),))
def test_is_celestial(ctype, cel):
mywcs = WCS(naxis=len(ctype))
mywcs.wcs.ctype = ctype
assert mywcs.is_celestial == cel
@pytest.mark.parametrize(('ctype', 'cel'),
((['RA---TAN', 'DEC--TAN'], True),
(['RA---TAN', 'DEC--TAN', 'FREQ'], True),
(['RA---TAN', 'FREQ'], False),))
def test_has_celestial(ctype, cel):
mywcs = WCS(naxis=len(ctype))
mywcs.wcs.ctype = ctype
assert mywcs.has_celestial == cel
def test_has_celestial_correlated():
# Regression test for astropy/astropy#8416 - has_celestial failed when
# celestial axes were correlated with other axes.
mywcs = WCS(naxis=3)
mywcs.wcs.ctype = 'RA---TAN', 'DEC--TAN', 'FREQ'
mywcs.wcs.cd = np.ones((3, 3))
mywcs.wcs.set()
assert mywcs.has_celestial
@pytest.mark.parametrize(('cdelt', 'pc', 'cd'),
((np.array([0.1, 0.2]), np.eye(2), np.eye(2)),
(np.array([1, 1]), np.diag([0.1, 0.2]), np.eye(2)),
(np.array([0.1, 0.2]), np.eye(2), None),
(np.array([0.1, 0.2]), None, np.eye(2)),
))
def test_noncelestial_scale(cdelt, pc, cd):
mywcs = WCS(naxis=2)
if cd is not None:
mywcs.wcs.cd = cd
if pc is not None:
mywcs.wcs.pc = pc
mywcs.wcs.cdelt = cdelt
mywcs.wcs.ctype = ['RA---TAN', 'FREQ']
ps = non_celestial_pixel_scales(mywcs)
assert_almost_equal(ps.to_value(u.deg), np.array([0.1, 0.2]))
@pytest.mark.parametrize('mode', ['all', 'wcs'])
def test_skycoord_to_pixel(mode):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord
header = get_pkg_data_contents('data/maps/1904-66_TAN.hdr', encoding='binary')
wcs = WCS(header)
ref = SkyCoord(0.1 * u.deg, -89. * u.deg, frame='icrs')
xp, yp = skycoord_to_pixel(ref, wcs, mode=mode)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode).transform_to('icrs')
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
# Make sure you can specify a different class using ``cls`` keyword
class SkyCoord2(SkyCoord):
pass
new2 = pixel_to_skycoord(xp, yp, wcs, mode=mode,
cls=SkyCoord2).transform_to('icrs')
assert new2.__class__ is SkyCoord2
assert_allclose(new2.ra.degree, ref.ra.degree)
assert_allclose(new2.dec.degree, ref.dec.degree)
def test_skycoord_to_pixel_swapped():
# Regression test for a bug that caused skycoord_to_pixel and
# pixel_to_skycoord to not work correctly if the axes were swapped in the
# WCS.
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord
header = get_pkg_data_contents('data/maps/1904-66_TAN.hdr', encoding='binary')
wcs = WCS(header)
wcs_swapped = wcs.sub([WCSSUB_LATITUDE, WCSSUB_LONGITUDE])
ref = SkyCoord(0.1 * u.deg, -89. * u.deg, frame='icrs')
xp1, yp1 = skycoord_to_pixel(ref, wcs)
xp2, yp2 = skycoord_to_pixel(ref, wcs_swapped)
assert_allclose(xp1, xp2)
assert_allclose(yp1, yp2)
# WCS is in FK5 so we need to transform back to ICRS
new1 = pixel_to_skycoord(xp1, yp1, wcs).transform_to('icrs')
new2 = pixel_to_skycoord(xp1, yp1, wcs_swapped).transform_to('icrs')
assert_allclose(new1.ra.degree, new2.ra.degree)
assert_allclose(new1.dec.degree, new2.dec.degree)
def test_is_proj_plane_distorted():
# non-orthogonal CD:
wcs = WCS(naxis=2)
wcs.wcs.cd = [[-0.1, 0], [0, 0.2]]
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert(is_proj_plane_distorted(wcs))
# almost orthogonal CD:
wcs.wcs.cd = [[0.1 + 2.0e-7, 1.7e-7], [1.2e-7, 0.1 - 1.3e-7]]
assert(not is_proj_plane_distorted(wcs))
# real case:
header = get_pkg_data_filename('data/sip.fits')
wcs = WCS(header)
assert(is_proj_plane_distorted(wcs))
@pytest.mark.parametrize('mode', ['all', 'wcs'])
def test_skycoord_to_pixel_distortions(mode):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord
header = get_pkg_data_filename('data/sip.fits')
wcs = WCS(header)
ref = SkyCoord(202.50 * u.deg, 47.19 * u.deg, frame='icrs')
xp, yp = skycoord_to_pixel(ref, wcs, mode=mode)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode).transform_to('icrs')
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
|
850752283f2796e6a51acb5eb1557b5968afd07dd3066cecd5951c61df649e16 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import pytest
import numpy as np
from astropy.utils.data import get_pkg_data_filenames, get_pkg_data_contents
from astropy.utils.misc import NumpyRNGContext
from astropy import wcs
# hdr_map_file_list = list(get_pkg_data_filenames("maps", pattern="*.hdr"))
# use the base name of the file, because everything we yield
# will show up in the test name in the pandokia report
hdr_map_file_list = [os.path.basename(fname) for fname in get_pkg_data_filenames("data/maps", pattern="*.hdr")]
# Checking the number of files before reading them in.
# OLD COMMENTS:
# AFTER we tested with every file that we found, check to see that we
# actually have the list we expect. If N=0, we will not have performed
# any tests at all. If N < n_data_files, we are missing some files,
# so we will have skipped some tests. Without this check, both cases
# happen silently!
def test_read_map_files():
# how many map files we expect to see
n_map_files = 28
assert len(hdr_map_file_list) == n_map_files, (
"test_read_map_files has wrong number data files: found {}, expected "
" {}".format(len(hdr_map_file_list), n_map_files))
@pytest.mark.parametrize("filename", hdr_map_file_list)
def test_map(filename):
header = get_pkg_data_contents(os.path.join("data/maps", filename))
wcsobj = wcs.WCS(header)
with NumpyRNGContext(123456789):
x = np.random.rand(2 ** 12, wcsobj.wcs.naxis)
world = wcsobj.wcs_pix2world(x, 1)
pix = wcsobj.wcs_world2pix(x, 1)
hdr_spec_file_list = [os.path.basename(fname) for fname in get_pkg_data_filenames("data/spectra", pattern="*.hdr")]
def test_read_spec_files():
# how many spec files expected
n_spec_files = 6
assert len(hdr_spec_file_list) == n_spec_files, (
"test_spectra has wrong number data files: found {}, expected "
" {}".format(len(hdr_spec_file_list), n_spec_files))
# b.t.w. If this assert happens, py.test reports one more test
# than it would have otherwise.
@pytest.mark.parametrize("filename", hdr_spec_file_list)
def test_spectrum(filename):
header = get_pkg_data_contents(os.path.join("data", "spectra", filename))
wcsobj = wcs.WCS(header)
with NumpyRNGContext(123456789):
x = np.random.rand(2 ** 16, wcsobj.wcs.naxis)
world = wcsobj.wcs_pix2world(x, 1)
pix = wcsobj.wcs_world2pix(x, 1)
|
8311ed237fd69b445bdbc4b4afdfa757e869b8c3a2854ca31c4a411fc8607a82 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
import warnings
from datetime import datetime
import pytest
import numpy as np
from numpy.testing import (
assert_allclose, assert_array_almost_equal, assert_array_almost_equal_nulp,
assert_array_equal)
from astropy.tests.helper import raises, catch_warnings
from astropy import wcs
from astropy.wcs import _wcs
from astropy.utils.data import (
get_pkg_data_filenames, get_pkg_data_contents, get_pkg_data_filename)
from astropy.utils.misc import NumpyRNGContext
from astropy.utils.exceptions import AstropyUserWarning
from astropy.io import fits
from astropy.coordinates import SkyCoord
class TestMaps:
def setup(self):
# get the list of the hdr files that we want to test
self._file_list = list(get_pkg_data_filenames("data/maps", pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 28
assert len(self._file_list) == n_data_files, (
"test_spectra has wrong number data files: found {}, expected "
" {}".format(len(self._file_list), n_data_files))
def test_maps(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("data", "maps", filename), encoding='binary')
# finally run the test.
wcsobj = wcs.WCS(header)
world = wcsobj.wcs_pix2world([[97, 97]], 1)
assert_array_almost_equal(world, [[285.0, -66.25]], decimal=1)
pix = wcsobj.wcs_world2pix([[285.0, -66.25]], 1)
assert_array_almost_equal(pix, [[97, 97]], decimal=0)
class TestSpectra:
def setup(self):
self._file_list = list(get_pkg_data_filenames("data/spectra",
pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 6
assert len(self._file_list) == n_data_files, (
"test_spectra has wrong number data files: found {}, expected "
" {}".format(len(self._file_list), n_data_files))
def test_spectra(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("data", "spectra", filename), encoding='binary')
# finally run the test.
all_wcs = wcs.find_all_wcs(header)
assert len(all_wcs) == 9
def test_fixes():
"""
From github issue #36
"""
def run():
header = get_pkg_data_contents(
'data/nonstandard_units.hdr', encoding='binary')
try:
w = wcs.WCS(header, translate_units='dhs')
except wcs.InvalidTransformError:
pass
else:
assert False, "Expected InvalidTransformError"
with catch_warnings(wcs.FITSFixedWarning) as w:
run()
assert len(w) == 2
for item in w:
if 'unitfix' in str(item.message):
assert 'Hz' in str(item.message)
assert 'M/S' in str(item.message)
assert 'm/s' in str(item.message)
def test_outside_sky():
"""
From github issue #107
"""
header = get_pkg_data_contents(
'data/outside_sky.hdr', encoding='binary')
w = wcs.WCS(header)
assert np.all(np.isnan(w.wcs_pix2world([[100., 500.]], 0))) # outside sky
assert np.all(np.isnan(w.wcs_pix2world([[200., 200.]], 0))) # outside sky
assert not np.any(np.isnan(w.wcs_pix2world([[1000., 1000.]], 0)))
def test_pix2world():
"""
From github issue #1463
"""
# TODO: write this to test the expected output behavior of pix2world,
# currently this just makes sure it doesn't error out in unexpected ways
filename = get_pkg_data_filename('data/sip2.fits')
with catch_warnings(wcs.wcs.FITSFixedWarning) as caught_warnings:
# this raises a warning unimportant for this testing the pix2world
# FITSFixedWarning(u'The WCS transformation has more axes (2) than the
# image it is associated with (0)')
ww = wcs.WCS(filename)
# might as well monitor for changing behavior
assert len(caught_warnings) == 1
n = 3
pixels = (np.arange(n) * np.ones((2, n))).T
result = ww.wcs_pix2world(pixels, 0, ra_dec_order=True)
# Catch #2791
ww.wcs_pix2world(pixels[..., 0], pixels[..., 1], 0, ra_dec_order=True)
close_enough = 1e-8
# assuming that the data of sip2.fits doesn't change
answer = np.array([[0.00024976, 0.00023018],
[0.00023043, -0.00024997]])
assert np.all(np.abs(ww.wcs.pc - answer) < close_enough)
answer = np.array([[202.39265216, 47.17756518],
[202.39335826, 47.17754619],
[202.39406436, 47.1775272]])
assert np.all(np.abs(result - answer) < close_enough)
def test_load_fits_path():
fits_name = get_pkg_data_filename('data/sip.fits')
w = wcs.WCS(fits_name)
def test_dict_init():
"""
Test that WCS can be initialized with a dict-like object
"""
# Dictionary with no actual WCS, returns identity transform
w = wcs.WCS({})
xp, yp = w.wcs_world2pix(41., 2., 1)
assert_array_almost_equal_nulp(xp, 41., 10)
assert_array_almost_equal_nulp(yp, 2., 10)
# Valid WCS
w = wcs.WCS({'CTYPE1': 'GLON-CAR',
'CTYPE2': 'GLAT-CAR',
'CUNIT1': 'deg',
'CUNIT2': 'deg',
'CRPIX1': 1,
'CRPIX2': 1,
'CRVAL1': 40.,
'CRVAL2': 0.,
'CDELT1': -0.1,
'CDELT2': 0.1})
xp, yp = w.wcs_world2pix(41., 2., 0)
assert_array_almost_equal_nulp(xp, -10., 10)
assert_array_almost_equal_nulp(yp, 20., 10)
@raises(TypeError)
def test_extra_kwarg():
"""
Issue #444
"""
w = wcs.WCS()
with NumpyRNGContext(123456789):
data = np.random.rand(100, 2)
w.wcs_pix2world(data, origin=1)
def test_3d_shapes():
"""
Issue #444
"""
w = wcs.WCS(naxis=3)
with NumpyRNGContext(123456789):
data = np.random.rand(100, 3)
result = w.wcs_pix2world(data, 1)
assert result.shape == (100, 3)
result = w.wcs_pix2world(
data[..., 0], data[..., 1], data[..., 2], 1)
assert len(result) == 3
def test_preserve_shape():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = np.random.random((2, 3, 4))
xw, yw = w.wcs_pix2world(x, y, 1)
assert xw.shape == (2, 3, 4)
assert yw.shape == (2, 3, 4)
xp, yp = w.wcs_world2pix(x, y, 1)
assert xp.shape == (2, 3, 4)
assert yp.shape == (2, 3, 4)
def test_broadcasting():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = 1
xp, yp = w.wcs_world2pix(x, y, 1)
assert xp.shape == (2, 3, 4)
assert yp.shape == (2, 3, 4)
def test_shape_mismatch():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = np.random.random((3, 2, 4))
with pytest.raises(ValueError) as exc:
xw, yw = w.wcs_pix2world(x, y, 1)
assert exc.value.args[0] == "Coordinate arrays are not broadcastable to each other"
with pytest.raises(ValueError) as exc:
xp, yp = w.wcs_world2pix(x, y, 1)
assert exc.value.args[0] == "Coordinate arrays are not broadcastable to each other"
# There are some ambiguities that need to be worked around when
# naxis == 1
w = wcs.WCS(naxis=1)
x = np.random.random((42, 1))
xw = w.wcs_pix2world(x, 1)
assert xw.shape == (42, 1)
x = np.random.random((42,))
xw, = w.wcs_pix2world(x, 1)
assert xw.shape == (42,)
def test_invalid_shape():
# Issue #1395
w = wcs.WCS(naxis=2)
xy = np.random.random((2, 3))
with pytest.raises(ValueError) as exc:
xy2 = w.wcs_pix2world(xy, 1)
assert exc.value.args[0] == 'When providing two arguments, the array must be of shape (N, 2)'
xy = np.random.random((2, 1))
with pytest.raises(ValueError) as exc:
xy2 = w.wcs_pix2world(xy, 1)
assert exc.value.args[0] == 'When providing two arguments, the array must be of shape (N, 2)'
def test_warning_about_defunct_keywords():
def run():
header = get_pkg_data_contents(
'data/defunct_keywords.hdr', encoding='binary')
w = wcs.WCS(header)
with catch_warnings(wcs.FITSFixedWarning) as w:
run()
assert len(w) == 4
for item in w:
assert 'PCi_ja' in str(item.message)
# Make sure the warnings come out every time...
with catch_warnings(wcs.FITSFixedWarning) as w:
run()
assert len(w) == 4
for item in w:
assert 'PCi_ja' in str(item.message)
def test_warning_about_defunct_keywords_exception():
def run():
header = get_pkg_data_contents(
'data/defunct_keywords.hdr', encoding='binary')
w = wcs.WCS(header)
with pytest.raises(wcs.FITSFixedWarning):
warnings.simplefilter("error", wcs.FITSFixedWarning)
run()
# Restore warnings filter to previous state
warnings.simplefilter("default")
def test_to_header_string():
header_string = """
WCSAXES = 2 / Number of coordinate axes CRPIX1 = 0.0 / Pixel coordinate of reference point CRPIX2 = 0.0 / Pixel coordinate of reference point CDELT1 = 1.0 / Coordinate increment at reference point CDELT2 = 1.0 / Coordinate increment at reference point CRVAL1 = 0.0 / Coordinate value at reference point CRVAL2 = 0.0 / Coordinate value at reference point LATPOLE = 90.0 / [deg] Native latitude of celestial pole END"""
w = wcs.WCS()
h0 = fits.Header.fromstring(w.to_header_string().strip())
if 'COMMENT' in h0:
del h0['COMMENT']
if '' in h0:
del h0['']
h1 = fits.Header.fromstring(header_string.strip())
assert dict(h0) == dict(h1)
def test_to_fits():
w = wcs.WCS()
header_string = w.to_header()
wfits = w.to_fits()
assert isinstance(wfits, fits.HDUList)
assert isinstance(wfits[0], fits.PrimaryHDU)
assert header_string == wfits[0].header[-8:]
def test_to_header_warning():
fits_name = get_pkg_data_filename('data/sip.fits')
x = wcs.WCS(fits_name)
with catch_warnings() as w:
x.to_header()
assert len(w) == 1
assert 'A_ORDER' in str(w[0])
def test_no_comments_in_header():
w = wcs.WCS()
header = w.to_header()
assert w.wcs.alt not in header
assert 'COMMENT' + w.wcs.alt.strip() not in header
assert 'COMMENT' not in header
wkey = 'P'
header = w.to_header(key=wkey)
assert wkey not in header
assert 'COMMENT' not in header
assert 'COMMENT' + w.wcs.alt.strip() not in header
@raises(wcs.InvalidTransformError)
def test_find_all_wcs_crash():
"""
Causes a double free without a recent fix in wcslib_wrap.C
"""
with open(get_pkg_data_filename("data/too_many_pv.hdr")) as fd:
header = fd.read()
# We have to set fix=False here, because one of the fixing tasks is to
# remove redundant SCAMP distortion parameters when SIP distortion
# parameters are also present.
wcses = wcs.find_all_wcs(header, fix=False)
def test_validate():
with catch_warnings():
results = wcs.validate(get_pkg_data_filename("data/validate.fits"))
results_txt = repr(results)
version = wcs._wcs.__version__
if version[0] == '6':
filename = 'data/validate.6.txt'
elif version[0] == '5':
if version >= '5.13':
filename = 'data/validate.5.13.txt'
else:
filename = 'data/validate.5.0.txt'
else:
filename = 'data/validate.txt'
with open(get_pkg_data_filename(filename), "r") as fd:
lines = fd.readlines()
assert set([x.strip() for x in lines]) == set([
x.strip() for x in results_txt.splitlines()])
def test_validate_with_2_wcses():
# From Issue #2053
results = wcs.validate(get_pkg_data_filename("data/2wcses.hdr"))
assert "WCS key 'A':" in str(results)
def test_crpix_maps_to_crval():
twcs = wcs.WCS(naxis=2)
twcs.wcs.crval = [251.29, 57.58]
twcs.wcs.cdelt = [1, 1]
twcs.wcs.crpix = [507, 507]
twcs.wcs.pc = np.array([[7.7e-6, 3.3e-5], [3.7e-5, -6.8e-6]])
twcs._naxis = [1014, 1014]
twcs.wcs.ctype = ['RA---TAN-SIP', 'DEC--TAN-SIP']
a = np.array(
[[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[ -2.81029767e-13, 0.0, 0.0, 0.0, 0.0]]
)
b = np.array(
[[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0]]
)
twcs.sip = wcs.Sip(a, b, None, None, twcs.wcs.crpix)
twcs.wcs.set()
pscale = np.sqrt(wcs.utils.proj_plane_pixel_area(twcs))
# test that CRPIX maps to CRVAL:
assert_allclose(
twcs.wcs_pix2world(*twcs.wcs.crpix, 1), twcs.wcs.crval,
rtol=0.0, atol=1e-6 * pscale
)
# test that CRPIX maps to CRVAL:
assert_allclose(
twcs.all_pix2world(*twcs.wcs.crpix, 1), twcs.wcs.crval,
rtol=0.0, atol=1e-6 * pscale
)
def test_all_world2pix(fname=None, ext=0,
tolerance=1.0e-4, origin=0,
random_npts=25000,
adaptive=False, maxiter=20,
detect_divergence=True):
"""Test all_world2pix, iterative inverse of all_pix2world"""
# Open test FITS file:
if fname is None:
fname = get_pkg_data_filename('data/j94f05bgq_flt.fits')
ext = ('SCI', 1)
if not os.path.isfile(fname):
raise OSError("Input file '{:s}' to 'test_all_world2pix' not found."
.format(fname))
h = fits.open(fname)
w = wcs.WCS(h[ext].header, h)
h.close()
del h
crpix = w.wcs.crpix
ncoord = crpix.shape[0]
# Assume that CRPIX is at the center of the image and that the image has
# a power-of-2 number of pixels along each axis. Only use the central
# 1/64 for this testing purpose:
naxesi_l = list((7. / 16 * crpix).astype(int))
naxesi_u = list((9. / 16 * crpix).astype(int))
# Generate integer indices of pixels (image grid):
img_pix = np.dstack([i.flatten() for i in
np.meshgrid(*map(range, naxesi_l, naxesi_u))])[0]
# Generage random data (in image coordinates):
with NumpyRNGContext(123456789):
rnd_pix = np.random.rand(random_npts, ncoord)
# Scale random data to cover the central part of the image
mwidth = 2 * (crpix * 1. / 8)
rnd_pix = crpix - 0.5 * mwidth + (mwidth - 1) * rnd_pix
# Reference pixel coordinates in image coordinate system (CS):
test_pix = np.append(img_pix, rnd_pix, axis=0)
# Reference pixel coordinates in sky CS using forward transformation:
all_world = w.all_pix2world(test_pix, origin)
try:
runtime_begin = datetime.now()
# Apply the inverse iterative process to pixels in world coordinates
# to recover the pixel coordinates in image space.
all_pix = w.all_world2pix(
all_world, origin, tolerance=tolerance, adaptive=adaptive,
maxiter=maxiter, detect_divergence=detect_divergence)
runtime_end = datetime.now()
except wcs.wcs.NoConvergence as e:
runtime_end = datetime.now()
ndiv = 0
if e.divergent is not None:
ndiv = e.divergent.shape[0]
print("There are {} diverging solutions.".format(ndiv))
print("Indices of diverging solutions:\n{}"
.format(e.divergent))
print("Diverging solutions:\n{}\n"
.format(e.best_solution[e.divergent]))
print("Mean radius of the diverging solutions: {}"
.format(np.mean(
np.linalg.norm(e.best_solution[e.divergent], axis=1))))
print("Mean accuracy of the diverging solutions: {}\n"
.format(np.mean(
np.linalg.norm(e.accuracy[e.divergent], axis=1))))
else:
print("There are no diverging solutions.")
nslow = 0
if e.slow_conv is not None:
nslow = e.slow_conv.shape[0]
print("There are {} slowly converging solutions."
.format(nslow))
print("Indices of slowly converging solutions:\n{}"
.format(e.slow_conv))
print("Slowly converging solutions:\n{}\n"
.format(e.best_solution[e.slow_conv]))
else:
print("There are no slowly converging solutions.\n")
print("There are {} converged solutions."
.format(e.best_solution.shape[0] - ndiv - nslow))
print("Best solutions (all points):\n{}"
.format(e.best_solution))
print("Accuracy:\n{}\n".format(e.accuracy))
print("\nFinished running 'test_all_world2pix' with errors.\n"
"ERROR: {}\nRun time: {}\n"
.format(e.args[0], runtime_end - runtime_begin))
raise e
# Compute differences between reference pixel coordinates and
# pixel coordinates (in image space) recovered from reference
# pixels in world coordinates:
errors = np.sqrt(np.sum(np.power(all_pix - test_pix, 2), axis=1))
meanerr = np.mean(errors)
maxerr = np.amax(errors)
print("\nFinished running 'test_all_world2pix'.\n"
"Mean error = {0:e} (Max error = {1:e})\n"
"Run time: {2}\n"
.format(meanerr, maxerr, runtime_end - runtime_begin))
assert(maxerr < 2.0 * tolerance)
def test_scamp_sip_distortion_parameters():
"""
Test parsing of WCS parameters with redundant SIP and SCAMP distortion
parameters.
"""
header = get_pkg_data_contents('data/validate.fits', encoding='binary')
w = wcs.WCS(header)
# Just check that this doesn't raise an exception.
w.all_pix2world(0, 0, 0)
def test_fixes2():
"""
From github issue #1854
"""
header = get_pkg_data_contents(
'data/nonstandard_units.hdr', encoding='binary')
with pytest.raises(wcs.InvalidTransformError):
w = wcs.WCS(header, fix=False)
def test_unit_normalization():
"""
From github issue #1918
"""
header = get_pkg_data_contents(
'data/unit.hdr', encoding='binary')
w = wcs.WCS(header)
assert w.wcs.cunit[2] == 'm/s'
def test_footprint_to_file(tmpdir):
"""
From github issue #1912
"""
# Arbitrary keywords from real data
hdr = {'CTYPE1': 'RA---ZPN', 'CRUNIT1': 'deg',
'CRPIX1': -3.3495999e+02, 'CRVAL1': 3.185790700000e+02,
'CTYPE2': 'DEC--ZPN', 'CRUNIT2': 'deg',
'CRPIX2': 3.0453999e+03, 'CRVAL2': 4.388538000000e+01,
'PV2_1': 1., 'PV2_3': 220., 'NAXIS1': 2048, 'NAXIS2': 1024}
w = wcs.WCS(hdr)
testfile = str(tmpdir.join('test.txt'))
w.footprint_to_file(testfile)
with open(testfile, 'r') as f:
lines = f.readlines()
assert len(lines) == 4
assert lines[2] == 'ICRS\n'
assert 'color=green' in lines[3]
w.footprint_to_file(testfile, coordsys='FK5', color='red')
with open(testfile, 'r') as f:
lines = f.readlines()
assert len(lines) == 4
assert lines[2] == 'FK5\n'
assert 'color=red' in lines[3]
with pytest.raises(ValueError):
w.footprint_to_file(testfile, coordsys='FOO')
del hdr['NAXIS1']
del hdr['NAXIS2']
w = wcs.WCS(hdr)
with pytest.warns(AstropyUserWarning):
w.footprint_to_file(testfile)
def test_validate_faulty_wcs():
"""
From github issue #2053
"""
h = fits.Header()
# Illegal WCS:
h['RADESYSA'] = 'ICRS'
h['PV2_1'] = 1.0
hdu = fits.PrimaryHDU([[0]], header=h)
hdulist = fits.HDUList([hdu])
# Check that this doesn't raise a NameError exception:
wcs.validate(hdulist)
def test_error_message():
header = get_pkg_data_contents(
'data/invalid_header.hdr', encoding='binary')
with pytest.raises(wcs.InvalidTransformError):
# Both lines are in here, because 0.4 calls .set within WCS.__init__,
# whereas 0.3 and earlier did not.
w = wcs.WCS(header, _do_set=False)
c = w.all_pix2world([[536.0, 894.0]], 0)
def test_out_of_bounds():
# See #2107
header = get_pkg_data_contents('data/zpn-hole.hdr', encoding='binary')
w = wcs.WCS(header)
ra, dec = w.wcs_pix2world(110, 110, 0)
assert np.isnan(ra)
assert np.isnan(dec)
ra, dec = w.wcs_pix2world(0, 0, 0)
assert not np.isnan(ra)
assert not np.isnan(dec)
def test_calc_footprint_1():
fits = get_pkg_data_filename('data/sip.fits')
w = wcs.WCS(fits)
axes = (1000, 1051)
ref = np.array([[202.39314493, 47.17753352],
[202.71885939, 46.94630488],
[202.94631893, 47.15855022],
[202.72053428, 47.37893142]])
footprint = w.calc_footprint(axes=axes)
assert_allclose(footprint, ref)
def test_calc_footprint_2():
""" Test calc_footprint without distortion. """
fits = get_pkg_data_filename('data/sip.fits')
w = wcs.WCS(fits)
axes = (1000, 1051)
ref = np.array([[202.39265216, 47.17756518],
[202.7469062, 46.91483312],
[203.11487481, 47.14359319],
[202.76092671, 47.40745948]])
footprint = w.calc_footprint(axes=axes, undistort=False)
assert_allclose(footprint, ref)
def test_calc_footprint_3():
""" Test calc_footprint with corner of the pixel."""
w = wcs.WCS()
w.wcs.ctype = ["GLON-CAR", "GLAT-CAR"]
w.wcs.crpix = [1.5, 5.5]
w.wcs.cdelt = [-0.1, 0.1]
axes = (2, 10)
ref = np.array([[0.1, -0.5],
[0.1, 0.5],
[359.9, 0.5],
[359.9, -0.5]])
footprint = w.calc_footprint(axes=axes, undistort=False, center=False)
assert_allclose(footprint, ref)
def test_sip():
# See #2107
header = get_pkg_data_contents('data/irac_sip.hdr', encoding='binary')
w = wcs.WCS(header)
x0, y0 = w.sip_pix2foc(200, 200, 0)
assert_allclose(72, x0, 1e-3)
assert_allclose(72, y0, 1e-3)
x1, y1 = w.sip_foc2pix(x0, y0, 0)
assert_allclose(200, x1, 1e-3)
assert_allclose(200, y1, 1e-3)
def test_printwcs():
"""
Just make sure that it runs
"""
h = get_pkg_data_contents('data/spectra/orion-freq-1.hdr', encoding='binary')
w = wcs.WCS(h)
w.printwcs()
h = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary')
w = wcs.WCS(h)
w.printwcs()
def test_invalid_spherical():
header = """
SIMPLE = T / conforms to FITS standard
BITPIX = 8 / array data type
WCSAXES = 2 / no comment
CTYPE1 = 'RA---TAN' / TAN (gnomic) projection
CTYPE2 = 'DEC--TAN' / TAN (gnomic) projection
EQUINOX = 2000.0 / Equatorial coordinates definition (yr)
LONPOLE = 180.0 / no comment
LATPOLE = 0.0 / no comment
CRVAL1 = 16.0531567459 / RA of reference point
CRVAL2 = 23.1148929108 / DEC of reference point
CRPIX1 = 2129 / X reference pixel
CRPIX2 = 1417 / Y reference pixel
CUNIT1 = 'deg ' / X pixel scale units
CUNIT2 = 'deg ' / Y pixel scale units
CD1_1 = -0.00912247310646 / Transformation matrix
CD1_2 = -0.00250608809647 / no comment
CD2_1 = 0.00250608809647 / no comment
CD2_2 = -0.00912247310646 / no comment
IMAGEW = 4256 / Image width, in pixels.
IMAGEH = 2832 / Image height, in pixels.
"""
f = io.StringIO(header)
header = fits.Header.fromtextfile(f)
w = wcs.WCS(header)
x, y = w.wcs_world2pix(211, -26, 0)
assert np.isnan(x) and np.isnan(y)
def test_no_iteration():
# Regression test for #3066
w = wcs.WCS(naxis=2)
with pytest.raises(TypeError) as exc:
iter(w)
assert exc.value.args[0] == "'WCS' object is not iterable"
class NewWCS(wcs.WCS):
pass
w = NewWCS(naxis=2)
with pytest.raises(TypeError) as exc:
iter(w)
assert exc.value.args[0] == "'NewWCS' object is not iterable"
@pytest.mark.skipif('_wcs.__version__[0] < "5"',
reason="TPV only works with wcslib 5.x or later")
def test_sip_tpv_agreement():
sip_header = get_pkg_data_contents(
os.path.join("data", "siponly.hdr"), encoding='binary')
tpv_header = get_pkg_data_contents(
os.path.join("data", "tpvonly.hdr"), encoding='binary')
w_sip = wcs.WCS(sip_header)
w_tpv = wcs.WCS(tpv_header)
assert_array_almost_equal(
w_sip.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv.all_pix2world([w_tpv.wcs.crpix], 1))
w_sip2 = wcs.WCS(w_sip.to_header())
w_tpv2 = wcs.WCS(w_tpv.to_header())
assert_array_almost_equal(
w_sip.all_pix2world([w_sip.wcs.crpix], 1),
w_sip2.all_pix2world([w_sip.wcs.crpix], 1))
assert_array_almost_equal(
w_tpv.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv2.all_pix2world([w_sip.wcs.crpix], 1))
assert_array_almost_equal(
w_sip2.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv2.all_pix2world([w_tpv.wcs.crpix], 1))
@pytest.mark.skipif('_wcs.__version__[0] < "5"',
reason="TPV only works with wcslib 5.x or later")
def test_tpv_copy():
# See #3904
tpv_header = get_pkg_data_contents(
os.path.join("data", "tpvonly.hdr"), encoding='binary')
w_tpv = wcs.WCS(tpv_header)
ra, dec = w_tpv.wcs_pix2world([0, 100, 200], [0, -100, 200], 0)
assert ra[0] != ra[1] and ra[1] != ra[2]
assert dec[0] != dec[1] and dec[1] != dec[2]
def test_hst_wcs():
path = get_pkg_data_filename("data/dist_lookup.fits.gz")
hdulist = fits.open(path)
# wcslib will complain about the distortion parameters if they
# weren't correctly deleted from the header
w = wcs.WCS(hdulist[1].header, hdulist)
# Exercise the main transformation functions, mainly just for
# coverage
w.p4_pix2foc([0, 100, 200], [0, -100, 200], 0)
w.det2im([0, 100, 200], [0, -100, 200], 0)
w.cpdis1 = w.cpdis1
w.cpdis2 = w.cpdis2
w.det2im1 = w.det2im1
w.det2im2 = w.det2im2
w.sip = w.sip
w.cpdis1.cdelt = w.cpdis1.cdelt
w.cpdis1.crpix = w.cpdis1.crpix
w.cpdis1.crval = w.cpdis1.crval
w.cpdis1.data = w.cpdis1.data
assert w.sip.a_order == 4
assert w.sip.b_order == 4
assert w.sip.ap_order == 0
assert w.sip.bp_order == 0
assert_array_equal(w.sip.crpix, [2048., 1024.])
wcs.WCS(hdulist[1].header, hdulist)
hdulist.close()
def test_list_naxis():
path = get_pkg_data_filename("data/dist_lookup.fits.gz")
hdulist = fits.open(path)
# wcslib will complain about the distortion parameters if they
# weren't correctly deleted from the header
w = wcs.WCS(hdulist[1].header, hdulist, naxis=['celestial'])
assert w.naxis == 2
assert w.wcs.naxis == 2
path = get_pkg_data_filename("data/maps/1904-66_SIN.hdr")
with open(path, 'rb') as fd:
content = fd.read()
w = wcs.WCS(content, naxis=['celestial'])
assert w.naxis == 2
assert w.wcs.naxis == 2
w = wcs.WCS(content, naxis=['spectral'])
assert w.naxis == 0
assert w.wcs.naxis == 0
hdulist.close()
def test_sip_broken():
# This header caused wcslib to segfault because it has a SIP
# specification in a non-default keyword
hdr = get_pkg_data_contents("data/sip-broken.hdr")
w = wcs.WCS(hdr)
def test_no_truncate_crval():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [50, 50, 2.12345678e11]
w.wcs.cdelt = [1e-3, 1e-3, 1e8]
w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'FREQ']
w.wcs.set()
header = w.to_header()
for ii in range(3):
assert header['CRVAL{0}'.format(ii + 1)] == w.wcs.crval[ii]
assert header['CDELT{0}'.format(ii + 1)] == w.wcs.cdelt[ii]
def test_no_truncate_crval_try2():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [50, 50, 2.12345678e11]
w.wcs.cdelt = [1e-5, 1e-5, 1e5]
w.wcs.ctype = ['RA---SIN', 'DEC--SIN', 'FREQ']
w.wcs.cunit = ['deg', 'deg', 'Hz']
w.wcs.crpix = [1, 1, 1]
w.wcs.restfrq = 2.34e11
w.wcs.set()
header = w.to_header()
for ii in range(3):
assert header['CRVAL{0}'.format(ii + 1)] == w.wcs.crval[ii]
assert header['CDELT{0}'.format(ii + 1)] == w.wcs.cdelt[ii]
def test_no_truncate_crval_p17():
"""
Regression test for https://github.com/astropy/astropy/issues/5162
"""
w = wcs.WCS(naxis=2)
w.wcs.crval = [50.1234567890123456, 50.1234567890123456]
w.wcs.cdelt = [1e-3, 1e-3]
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w.wcs.set()
header = w.to_header()
assert header['CRVAL1'] != w.wcs.crval[0]
assert header['CRVAL2'] != w.wcs.crval[1]
header = w.to_header(relax=wcs.WCSHDO_P17)
assert header['CRVAL1'] == w.wcs.crval[0]
assert header['CRVAL2'] == w.wcs.crval[1]
def test_no_truncate_using_compare():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
This one uses WCS.wcs.compare and some slightly different values
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [2.409303333333E+02, 50, 2.12345678e11]
w.wcs.cdelt = [1e-3, 1e-3, 1e8]
w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'FREQ']
w.wcs.set()
w2 = wcs.WCS(w.to_header())
w.wcs.compare(w2.wcs)
def test_passing_ImageHDU():
"""
Passing ImageHDU or PrimaryHDU and comparing it with
wcs initialized from header. For #4493.
"""
path = get_pkg_data_filename('data/validate.fits')
hdulist = fits.open(path)
wcs_hdu = wcs.WCS(hdulist[0])
wcs_header = wcs.WCS(hdulist[0].header)
assert wcs_hdu.wcs.compare(wcs_header.wcs)
wcs_hdu = wcs.WCS(hdulist[1])
wcs_header = wcs.WCS(hdulist[1].header)
assert wcs_hdu.wcs.compare(wcs_header.wcs)
hdulist.close()
def test_inconsistent_sip():
"""
Test for #4814
"""
hdr = get_pkg_data_contents("data/sip-broken.hdr")
w = wcs.WCS(hdr)
newhdr = w.to_header(relax=None)
# CTYPE should not include "-SIP" if relax is None
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
newhdr = w.to_header(relax=False)
assert('A_0_2' not in newhdr)
# CTYPE should not include "-SIP" if relax is False
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
newhdr = w.to_header(key="C")
assert('A_0_2' not in newhdr)
# Test writing header with a different key
wnew = wcs.WCS(newhdr, key='C')
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
newhdr = w.to_header(key=" ")
# Test writing a primary WCS to header
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
# Test that "-SIP" is kept into CTYPE if relax=True and
# "-SIP" was in the original header
newhdr = w.to_header(relax=True)
wnew = wcs.WCS(newhdr)
assert all(ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
assert('A_0_2' in newhdr)
# Test that SIP coefficients are also written out.
assert wnew.sip is not None
# ######### broken header ###########
# Test that "-SIP" is added to CTYPE if relax=True and
# "-SIP" was not in the original header but SIP coefficients
# are present.
w = wcs.WCS(hdr)
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
newhdr = w.to_header(relax=True)
wnew = wcs.WCS(newhdr)
assert all(ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
def test_bounds_check():
"""Test for #4957"""
w = wcs.WCS(naxis=2)
w.wcs.ctype = ["RA---CAR", "DEC--CAR"]
w.wcs.cdelt = [10, 10]
w.wcs.crval = [-90, 90]
w.wcs.crpix = [1, 1]
w.wcs.bounds_check(False, False)
ra, dec = w.wcs_pix2world(300, 0, 0)
assert_allclose(ra, -180)
assert_allclose(dec, -30)
def test_naxis():
w = wcs.WCS(naxis=2)
w.wcs.crval = [1, 1]
w.wcs.cdelt = [0.1, 0.1]
w.wcs.crpix = [1, 1]
w._naxis = [1000, 500]
assert w.pixel_shape == (1000, 500)
assert w.array_shape == (500, 1000)
w.pixel_shape = (99, 59)
assert w._naxis == [99, 59]
w.array_shape = (45, 23)
assert w._naxis == [23, 45]
assert w.pixel_shape == (23, 45)
w.pixel_shape = None
assert w.pixel_bounds is None
def test_sip_with_altkey():
"""
Test that when creating a WCS object using a key, CTYPE with
that key is looked at and not the primary CTYPE.
fix for #5443.
"""
with fits.open(get_pkg_data_filename('data/sip.fits')) as f:
w = wcs.WCS(f[0].header)
# create a header with two WCSs.
h1 = w.to_header(relax=True, key='A')
h2 = w.to_header(relax=False)
h1['CTYPE1A'] = "RA---SIN-SIP"
h1['CTYPE2A'] = "DEC--SIN-SIP"
h1.update(h2)
w = wcs.WCS(h1, key='A')
assert (w.wcs.ctype == np.array(['RA---SIN-SIP', 'DEC--SIN-SIP'])).all()
def test_to_fits_1():
"""
Test to_fits() with LookupTable distortion.
"""
fits_name = get_pkg_data_filename('data/dist.fits')
w = wcs.WCS(fits_name)
wfits = w.to_fits()
assert isinstance(wfits, fits.HDUList)
assert isinstance(wfits[0], fits.PrimaryHDU)
assert isinstance(wfits[1], fits.ImageHDU)
def test_keyedsip():
"""
Test sip reading with extra key.
"""
hdr_name = get_pkg_data_filename('data/sip-broken.hdr')
header = fits.Header.fromfile(hdr_name)
del header[str("CRPIX1")]
del header[str("CRPIX2")]
w = wcs.WCS(header=header, key="A")
assert isinstance( w.sip, wcs.Sip )
assert w.sip.crpix[0] == 2048
assert w.sip.crpix[1] == 1026
def test_zero_size_input():
with fits.open(get_pkg_data_filename('data/sip.fits')) as f:
w = wcs.WCS(f[0].header)
inp = np.zeros((0, 2))
assert_array_equal(inp, w.all_pix2world(inp, 0))
assert_array_equal(inp, w.all_world2pix(inp, 0))
inp = [], [1]
result = w.all_pix2world([], [1], 0)
assert_array_equal(inp[0], result[0])
assert_array_equal(inp[1], result[1])
result = w.all_world2pix([], [1], 0)
assert_array_equal(inp[0], result[0])
assert_array_equal(inp[1], result[1])
def test_scalar_inputs():
"""
Issue #7845
"""
wcsobj = wcs.WCS(naxis=1)
result = wcsobj.all_pix2world(2, 1)
assert_array_equal(result, [np.array(2.)])
assert result[0].shape == ()
result = wcsobj.all_pix2world([2], 1)
assert_array_equal(result, [np.array([2.])])
assert result[0].shape == (1,)
def test_footprint_contains():
"""
Test WCS.footprint_contains(skycoord)
"""
header = """
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 1045.0 / Pixel coordinate of reference point
CRPIX2 = 1001.0 / Pixel coordinate of reference point
PC1_1 = -0.00556448550786 / Coordinate transformation matrix element
PC1_2 = -0.001042120133257 / Coordinate transformation matrix element
PC2_1 = 0.001181477028705 / Coordinate transformation matrix element
PC2_2 = -0.005590809742987 / Coordinate transformation matrix element
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / TAN (gnomonic) projection + SIP distortions
CTYPE2 = 'DEC--TAN' / TAN (gnomonic) projection + SIP distortions
CRVAL1 = 250.34971683647 / [deg] Coordinate value at reference point
CRVAL2 = 2.2808772582495 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 2.2808772582495 / [deg] Native latitude of celestial pole
RADESYS = 'ICRS' / Equatorial coordinate system
MJD-OBS = 58612.339199259 / [d] MJD of observation matching DATE-OBS
DATE-OBS= '2019-05-09T08:08:26.816Z' / ISO-8601 observation date matching MJD-OB
NAXIS = 2 / NAXIS
NAXIS1 = 2136 / length of first array dimension
NAXIS2 = 2078 / length of second array dimension
"""
header = fits.Header.fromstring(header.strip(),'\n')
test_wcs = wcs.WCS(header)
hasCoord = test_wcs.footprint_contains(SkyCoord(254,2,unit='deg'))
assert hasCoord == True
hasCoord = test_wcs.footprint_contains(SkyCoord(240,2,unit='deg'))
assert hasCoord == False
hasCoord = test_wcs.footprint_contains(SkyCoord(24,2,unit='deg'))
assert hasCoord == False
def test_cunit():
# Initializing WCS
w1 = wcs.WCS(naxis=2)
w2 = wcs.WCS(naxis=2)
w3 = wcs.WCS(naxis=2)
# Initializing the values of cunit
w1.wcs.cunit = ['deg', 'm/s']
w2.wcs.cunit = ['km/h', 'km/h']
w3.wcs.cunit = ['deg', 'm/s']
# Equality checking a cunit with itself
assert w1.wcs.cunit == w1.wcs.cunit
# Equality checking of two different cunit object having same values
assert w1.wcs.cunit == w3.wcs.cunit
# Inequality checking of two different cunit object having different values
assert not w1.wcs.cunit == w2.wcs.cunit
# Inequality checking of cunit with a list of literals
assert not w1.wcs.cunit == [1, 2, 3]
# Inequality checking with some characters
assert w1.wcs.cunit != ['a', 'b', 'c']
# Comparison is not implemented TypeError will raise
with pytest.raises(TypeError):
w1.wcs.cunit < w2.wcs.cunit
class TestWcsWithTime:
def setup(self):
fname = get_pkg_data_filename(
'data/header_with_time.fits')
self.header = fits.Header.fromfile(fname)
self.w = wcs.WCS(self.header, key='A')
def test_keywods2wcsprm(self):
""" Make sure Wcsprm is populated correctly from the header."""
ctype = [self.header[val] for val in self.header["CTYPE*"]]
crval = [self.header[val] for val in self.header["CRVAL*"]]
crpix = [self.header[val] for val in self.header["CRPIX*"]]
cdelt = [self.header[val] for val in self.header["CDELT*"]]
cunit = [self.header[val] for val in self.header["CUNIT*"]]
assert list(self.w.wcs.ctype) == ctype
assert list(self.w.wcs.axis_types) == [2200, 2201, 3300, 0]
assert_allclose(self.w.wcs.crval, crval)
assert_allclose(self.w.wcs.crpix, crpix)
assert_allclose(self.w.wcs.cdelt, cdelt)
assert list(self.w.wcs.cunit) == cunit
naxis = self.w.naxis
assert naxis == 4
pc = np.zeros((naxis, naxis), dtype=np.float64)
for i in range(1, 5):
for j in range(1, 5):
if i == j:
pc[i-1, j-1] = self.header.get('PC{}_{}A'.format(i, j), 1)
else:
pc[i-1, j-1] = self.header.get('PC{}_{}A'.format(i, j), 0)
assert_allclose(self.w.wcs.pc, pc)
char_keys = ['timesys', 'trefpos', 'trefdir', 'plephem', 'timeunit',
'dateref', 'dateobs', 'datebeg', 'dateavg', 'dateend']
for key in char_keys:
assert getattr(self.w.wcs, key) == self.header.get(key, "")
num_keys = ['mjdref', 'mjdobs', 'mjdbeg', 'mjdend',
'jepoch', 'bepoch', 'tstart', 'tstop', 'xposure',
'timsyer', 'timrder', 'timedel', 'timepixr',
'timeoffs', 'telapse', 'czphs', 'cperi']
for key in num_keys:
assert_allclose(getattr(self.w.wcs, key), self.header.get(key, np.nan))
def test_transforms(self):
assert_allclose(self.w.all_pix2world(*self.w.wcs.crpix, 1), self.w.wcs.crval)
|
b0e88f2efb7f85c830c3ba3f4ac30155b8bb4eb9d5d53227310c838309dd4d22 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from astropy.utils.data import get_pkg_data_contents, get_pkg_data_fileobj
from astropy.utils.misc import NumpyRNGContext
from astropy.io import fits
from astropy import wcs
def test_basic():
wcs1 = wcs.WCS()
s = pickle.dumps(wcs1)
wcs2 = pickle.loads(s)
def test_dist():
with get_pkg_data_fileobj(
os.path.join("data", "dist.fits"), encoding='binary') as test_file:
hdulist = fits.open(test_file)
wcs1 = wcs.WCS(hdulist[0].header, hdulist)
assert wcs1.det2im2 is not None
s = pickle.dumps(wcs1)
wcs2 = pickle.loads(s)
with NumpyRNGContext(123456789):
x = np.random.rand(2 ** 16, wcs1.wcs.naxis)
world1 = wcs1.all_pix2world(x, 1)
world2 = wcs2.all_pix2world(x, 1)
assert_array_almost_equal(world1, world2)
def test_sip():
with get_pkg_data_fileobj(
os.path.join("data", "sip.fits"), encoding='binary') as test_file:
hdulist = fits.open(test_file, ignore_missing_end=True)
wcs1 = wcs.WCS(hdulist[0].header)
assert wcs1.sip is not None
s = pickle.dumps(wcs1)
wcs2 = pickle.loads(s)
with NumpyRNGContext(123456789):
x = np.random.rand(2 ** 16, wcs1.wcs.naxis)
world1 = wcs1.all_pix2world(x, 1)
world2 = wcs2.all_pix2world(x, 1)
assert_array_almost_equal(world1, world2)
def test_sip2():
with get_pkg_data_fileobj(
os.path.join("data", "sip2.fits"), encoding='binary') as test_file:
hdulist = fits.open(test_file, ignore_missing_end=True)
wcs1 = wcs.WCS(hdulist[0].header)
assert wcs1.sip is not None
s = pickle.dumps(wcs1)
wcs2 = pickle.loads(s)
with NumpyRNGContext(123456789):
x = np.random.rand(2 ** 16, wcs1.wcs.naxis)
world1 = wcs1.all_pix2world(x, 1)
world2 = wcs2.all_pix2world(x, 1)
assert_array_almost_equal(world1, world2)
def test_wcs():
header = get_pkg_data_contents(
os.path.join("data", "outside_sky.hdr"), encoding='binary')
wcs1 = wcs.WCS(header)
s = pickle.dumps(wcs1)
wcs2 = pickle.loads(s)
with NumpyRNGContext(123456789):
x = np.random.rand(2 ** 16, wcs1.wcs.naxis)
world1 = wcs1.all_pix2world(x, 1)
world2 = wcs2.all_pix2world(x, 1)
assert_array_almost_equal(world1, world2)
class Sub(wcs.WCS):
def __init__(self, *args, **kwargs):
self.foo = 42
def test_subclass():
wcs = Sub()
s = pickle.dumps(wcs)
wcs2 = pickle.loads(s)
assert isinstance(wcs2, Sub)
assert wcs.foo == 42
assert wcs2.foo == 42
assert wcs2.wcs is not None
|
d54a621d551a5fdf4f03dac52c060cab8a236697ea3eb22cf966e032d5da921a | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import gc
import locale
import re
import pytest
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose)
from astropy.tests.helper import raises, catch_warnings
from astropy.io import fits
from astropy.wcs import wcs
from astropy.wcs import _wcs
from astropy.utils.data import get_pkg_data_contents, get_pkg_data_fileobj, get_pkg_data_filename
from astropy import units as u
######################################################################
def test_alt():
w = _wcs.Wcsprm()
assert w.alt == " "
w.alt = "X"
assert w.alt == "X"
del w.alt
assert w.alt == " "
@raises(ValueError)
def test_alt_invalid1():
w = _wcs.Wcsprm()
w.alt = "$"
@raises(ValueError)
def test_alt_invalid2():
w = _wcs.Wcsprm()
w.alt = " "
def test_axis_types():
w = _wcs.Wcsprm()
assert_array_equal(w.axis_types, [0, 0])
def test_cd():
w = _wcs.Wcsprm()
w.cd = [[1, 0], [0, 1]]
assert w.cd.dtype == float
assert w.has_cd() is True
assert_array_equal(w.cd, [[1, 0], [0, 1]])
del w.cd
assert w.has_cd() is False
@raises(AttributeError)
def test_cd_missing():
w = _wcs.Wcsprm()
assert w.has_cd() is False
w.cd
@raises(AttributeError)
def test_cd_missing2():
w = _wcs.Wcsprm()
w.cd = [[1, 0], [0, 1]]
assert w.has_cd() is True
del w.cd
assert w.has_cd() is False
w.cd
@raises(ValueError)
def test_cd_invalid():
w = _wcs.Wcsprm()
w.cd = [1, 0, 0, 1]
def test_cdfix():
w = _wcs.Wcsprm()
w.cdfix()
def test_cdelt():
w = _wcs.Wcsprm()
assert_array_equal(w.cdelt, [1, 1])
w.cdelt = [42, 54]
assert_array_equal(w.cdelt, [42, 54])
@raises(TypeError)
def test_cdelt_delete():
w = _wcs.Wcsprm()
del w.cdelt
def test_cel_offset():
w = _wcs.Wcsprm()
assert w.cel_offset is False
w.cel_offset = 'foo'
assert w.cel_offset is True
w.cel_offset = 0
assert w.cel_offset is False
def test_celfix():
# TODO: We need some data with -NCP or -GLS projections to test
# with. For now, this is just a smoke test
w = _wcs.Wcsprm()
assert w.celfix() == -1
def test_cname():
w = _wcs.Wcsprm()
# Test that this works as an iterator
for x in w.cname:
assert x == ''
assert list(w.cname) == ['', '']
w.cname = [b'foo', 'bar']
assert list(w.cname) == ['foo', 'bar']
@raises(TypeError)
def test_cname_invalid():
w = _wcs.Wcsprm()
w.cname = [42, 54]
def test_colax():
w = _wcs.Wcsprm()
assert w.colax.dtype == np.intc
assert_array_equal(w.colax, [0, 0])
w.colax = [42, 54]
assert_array_equal(w.colax, [42, 54])
w.colax[0] = 0
assert_array_equal(w.colax, [0, 54])
with pytest.raises(ValueError):
w.colax = [1, 2, 3]
def test_colnum():
w = _wcs.Wcsprm()
assert w.colnum == 0
w.colnum = 42
assert w.colnum == 42
with pytest.raises(OverflowError):
w.colnum = 0xffffffffffffffffffff
with pytest.raises(OverflowError):
w.colnum = 0xffffffff
with pytest.raises(TypeError):
del w.colnum
@raises(TypeError)
def test_colnum_invalid():
w = _wcs.Wcsprm()
w.colnum = 'foo'
def test_crder():
w = _wcs.Wcsprm()
assert w.crder.dtype == float
assert np.all(np.isnan(w.crder))
w.crder[0] = 0
assert np.isnan(w.crder[1])
assert w.crder[0] == 0
w.crder = w.crder
def test_crota():
w = _wcs.Wcsprm()
w.crota = [1, 0]
assert w.crota.dtype == float
assert w.has_crota() is True
assert_array_equal(w.crota, [1, 0])
del w.crota
assert w.has_crota() is False
@raises(AttributeError)
def test_crota_missing():
w = _wcs.Wcsprm()
assert w.has_crota() is False
w.crota
@raises(AttributeError)
def test_crota_missing2():
w = _wcs.Wcsprm()
w.crota = [1, 0]
assert w.has_crota() is True
del w.crota
assert w.has_crota() is False
w.crota
def test_crpix():
w = _wcs.Wcsprm()
assert w.crpix.dtype == float
assert_array_equal(w.crpix, [0, 0])
w.crpix = [42, 54]
assert_array_equal(w.crpix, [42, 54])
w.crpix[0] = 0
assert_array_equal(w.crpix, [0, 54])
with pytest.raises(ValueError):
w.crpix = [1, 2, 3]
def test_crval():
w = _wcs.Wcsprm()
assert w.crval.dtype == float
assert_array_equal(w.crval, [0, 0])
w.crval = [42, 54]
assert_array_equal(w.crval, [42, 54])
w.crval[0] = 0
assert_array_equal(w.crval, [0, 54])
def test_csyer():
w = _wcs.Wcsprm()
assert w.csyer.dtype == float
assert np.all(np.isnan(w.csyer))
w.csyer[0] = 0
assert np.isnan(w.csyer[1])
assert w.csyer[0] == 0
w.csyer = w.csyer
def test_ctype():
w = _wcs.Wcsprm()
assert list(w.ctype) == ['', '']
w.ctype = [b'RA---TAN', 'DEC--TAN']
assert_array_equal(w.axis_types, [2200, 2201])
assert w.lat == 1
assert w.lng == 0
assert w.lattyp == 'DEC'
assert w.lngtyp == 'RA'
assert list(w.ctype) == ['RA---TAN', 'DEC--TAN']
w.ctype = ['foo', 'bar']
assert_array_equal(w.axis_types, [0, 0])
assert list(w.ctype) == ['foo', 'bar']
assert w.lat == -1
assert w.lng == -1
assert w.lattyp == 'DEC'
assert w.lngtyp == 'RA'
def test_ctype_repr():
w = _wcs.Wcsprm()
assert list(w.ctype) == ['', '']
w.ctype = [b'RA-\t--TAN', 'DEC-\n-TAN']
assert repr(w.ctype == '["RA-\t--TAN", "DEC-\n-TAN"]')
def test_ctype_index_error():
w = _wcs.Wcsprm()
assert list(w.ctype) == ['', '']
with pytest.raises(IndexError):
w.ctype[2] = 'FOO'
def test_ctype_invalid_error():
w = _wcs.Wcsprm()
assert list(w.ctype) == ['', '']
with pytest.raises(ValueError):
w.ctype[0] = 'X' * 100
with pytest.raises(TypeError):
w.ctype[0] = True
with pytest.raises(TypeError):
w.ctype = ['a', 0]
with pytest.raises(TypeError):
w.ctype = None
with pytest.raises(ValueError):
w.ctype = ['a', 'b', 'c']
with pytest.raises(ValueError):
w.ctype = ['FOO', 'A' * 100]
def test_cubeface():
w = _wcs.Wcsprm()
assert w.cubeface == -1
w.cubeface = 0
with pytest.raises(OverflowError):
w.cubeface = -1
def test_cunit():
w = _wcs.Wcsprm()
assert list(w.cunit) == [u.Unit(''), u.Unit('')]
w.cunit = [u.m, 'km']
assert w.cunit[0] == u.m
assert w.cunit[1] == u.km
def test_cunit_invalid():
w = _wcs.Wcsprm()
with catch_warnings() as warns:
w.cunit[0] = 'foo'
assert len(warns) == 1
assert 'foo' in str(warns[0].message)
def test_cunit_invalid2():
w = _wcs.Wcsprm()
with catch_warnings() as warns:
w.cunit = ['foo', 'bar']
assert len(warns) == 2
assert 'foo' in str(warns[0].message)
assert 'bar' in str(warns[1].message)
def test_unit():
w = wcs.WCS()
w.wcs.cunit[0] = u.erg
assert w.wcs.cunit[0] == u.erg
assert repr(w.wcs.cunit) == "['erg', '']"
def test_unit2():
w = wcs.WCS()
myunit = u.Unit("FOOBAR", parse_strict="warn")
w.wcs.cunit[0] = myunit
def test_unit3():
w = wcs.WCS()
with pytest.raises(IndexError):
w.wcs.cunit[2] = u.m
with pytest.raises(ValueError):
w.wcs.cunit = [u.m, u.m, u.m]
def test_unitfix():
w = _wcs.Wcsprm()
w.unitfix()
def test_cylfix():
# TODO: We need some data with broken cylindrical projections to
# test with. For now, this is just a smoke test.
w = _wcs.Wcsprm()
assert w.cylfix() == -1
assert w.cylfix([0, 1]) == -1
with pytest.raises(ValueError):
w.cylfix([0, 1, 2])
def test_dateavg():
w = _wcs.Wcsprm()
assert w.dateavg == ''
# TODO: When dateavg is verified, check that it works
def test_dateobs():
w = _wcs.Wcsprm()
assert w.dateobs == ''
# TODO: When dateavg is verified, check that it works
def test_datfix():
w = _wcs.Wcsprm()
w.dateobs = '31/12/99'
assert w.datfix() == 0
assert w.dateobs == '1999-12-31'
assert w.mjdobs == 51543.0
def test_equinox():
w = _wcs.Wcsprm()
assert np.isnan(w.equinox)
w.equinox = 0
assert w.equinox == 0
del w.equinox
assert np.isnan(w.equinox)
with pytest.raises(TypeError):
w.equinox = None
def test_fix():
w = _wcs.Wcsprm()
fix_ref = {
'cdfix': 'No change',
'cylfix': 'No change',
'obsfix': 'No change',
'datfix': 'No change',
'spcfix': 'No change',
'unitfix': 'No change',
'celfix': 'No change',
'obsfix': 'No change',}
version = wcs._wcs.__version__
if version[0] <= "5":
del fix_ref['obsfix']
assert w.fix() == fix_ref
def test_fix2():
w = _wcs.Wcsprm()
w.dateobs = '31/12/99'
fix_ref = {
'cdfix': 'No change',
'cylfix': 'No change',
'obsfix': 'No change',
'datfix': "Set MJD-OBS to 51543.000000 from DATE-OBS.\nChanged DATE-OBS from '31/12/99' to '1999-12-31'",
'spcfix': 'No change',
'unitfix': 'No change',
'celfix': 'No change'}
version = wcs._wcs.__version__
if version[0] <= "5":
del fix_ref['obsfix']
fix_ref['datfix'] = "Changed '31/12/99' to '1999-12-31'"
assert w.fix() == fix_ref
assert w.dateobs == '1999-12-31'
assert w.mjdobs == 51543.0
def test_fix3():
w = _wcs.Wcsprm()
w.dateobs = '31/12/F9'
fix_ref = {
'cdfix': 'No change',
'cylfix': 'No change',
'obsfix': 'No change',
'datfix': "Invalid DATE-OBS format '31/12/F9'",
'spcfix': 'No change',
'unitfix': 'No change',
'celfix': 'No change'}
version = wcs._wcs.__version__
if version[0] <= "5":
del fix_ref['obsfix']
fix_ref['datfix'] = "Invalid parameter value: invalid date '31/12/F9'"
assert w.fix() == fix_ref
assert w.dateobs == '31/12/F9'
assert np.isnan(w.mjdobs)
def test_fix4():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.fix('X')
def test_fix5():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.fix(naxis=[0, 1, 2])
def test_get_ps():
# TODO: We need some data with PSi_ma keywords
w = _wcs.Wcsprm()
assert len(w.get_ps()) == 0
def test_get_pv():
# TODO: We need some data with PVi_ma keywords
w = _wcs.Wcsprm()
assert len(w.get_pv()) == 0
@raises(AssertionError)
def test_imgpix_matrix():
w = _wcs.Wcsprm()
w.imgpix_matrix
@raises(AttributeError)
def test_imgpix_matrix2():
w = _wcs.Wcsprm()
w.imgpix_matrix = None
def test_isunity():
w = _wcs.Wcsprm()
assert(w.is_unity())
def test_lat():
w = _wcs.Wcsprm()
assert w.lat == -1
@raises(AttributeError)
def test_lat_set():
w = _wcs.Wcsprm()
w.lat = 0
def test_latpole():
w = _wcs.Wcsprm()
assert w.latpole == 90.0
w.latpole = 45.0
assert w.latpole == 45.0
del w.latpole
assert w.latpole == 90.0
def test_lattyp():
w = _wcs.Wcsprm()
print(repr(w.lattyp))
assert w.lattyp == " "
@raises(AttributeError)
def test_lattyp_set():
w = _wcs.Wcsprm()
w.lattyp = 0
def test_lng():
w = _wcs.Wcsprm()
assert w.lng == -1
@raises(AttributeError)
def test_lng_set():
w = _wcs.Wcsprm()
w.lng = 0
def test_lngtyp():
w = _wcs.Wcsprm()
assert w.lngtyp == " "
@raises(AttributeError)
def test_lngtyp_set():
w = _wcs.Wcsprm()
w.lngtyp = 0
def test_lonpole():
w = _wcs.Wcsprm()
assert np.isnan(w.lonpole)
w.lonpole = 45.0
assert w.lonpole == 45.0
del w.lonpole
assert np.isnan(w.lonpole)
def test_mix():
w = _wcs.Wcsprm()
w.ctype = [b'RA---TAN', 'DEC--TAN']
with pytest.raises(_wcs.InvalidCoordinateError):
w.mix(1, 1, [240, 480], 1, 5, [0, 2], [54, 32], 1)
def test_mjdavg():
w = _wcs.Wcsprm()
assert np.isnan(w.mjdavg)
w.mjdavg = 45.0
assert w.mjdavg == 45.0
del w.mjdavg
assert np.isnan(w.mjdavg)
def test_mjdobs():
w = _wcs.Wcsprm()
assert np.isnan(w.mjdobs)
w.mjdobs = 45.0
assert w.mjdobs == 45.0
del w.mjdobs
assert np.isnan(w.mjdobs)
def test_name():
w = _wcs.Wcsprm()
assert w.name == ''
w.name = 'foo'
assert w.name == 'foo'
def test_naxis():
w = _wcs.Wcsprm()
assert w.naxis == 2
@raises(AttributeError)
def test_naxis_set():
w = _wcs.Wcsprm()
w.naxis = 4
def test_obsgeo():
w = _wcs.Wcsprm()
assert np.all(np.isnan(w.obsgeo))
w.obsgeo = [1, 2, 3, 4, 5, 6]
assert_array_equal(w.obsgeo, [1, 2, 3, 4, 5, 6])
del w.obsgeo
assert np.all(np.isnan(w.obsgeo))
def test_pc():
w = _wcs.Wcsprm()
assert w.has_pc()
assert_array_equal(w.pc, [[1, 0], [0, 1]])
w.cd = [[1, 0], [0, 1]]
assert not w.has_pc()
del w.cd
assert w.has_pc()
assert_array_equal(w.pc, [[1, 0], [0, 1]])
w.pc = w.pc
@raises(AttributeError)
def test_pc_missing():
w = _wcs.Wcsprm()
w.cd = [[1, 0], [0, 1]]
assert not w.has_pc()
w.pc
def test_phi0():
w = _wcs.Wcsprm()
assert np.isnan(w.phi0)
w.phi0 = 42.0
assert w.phi0 == 42.0
del w.phi0
assert np.isnan(w.phi0)
@raises(AssertionError)
def test_piximg_matrix():
w = _wcs.Wcsprm()
w.piximg_matrix
@raises(AttributeError)
def test_piximg_matrix2():
w = _wcs.Wcsprm()
w.piximg_matrix = None
def test_print_contents():
# In general, this is human-consumable, so we don't care if the
# content changes, just check the type
w = _wcs.Wcsprm()
assert isinstance(str(w), str)
def test_radesys():
w = _wcs.Wcsprm()
assert w.radesys == ''
w.radesys = 'foo'
assert w.radesys == 'foo'
def test_restfrq():
w = _wcs.Wcsprm()
assert w.restfrq == 0.0
w.restfrq = np.nan
assert np.isnan(w.restfrq)
del w.restfrq
def test_restwav():
w = _wcs.Wcsprm()
assert w.restwav == 0.0
w.restwav = np.nan
assert np.isnan(w.restwav)
del w.restwav
def test_set_ps():
w = _wcs.Wcsprm()
data = [(0, 0, "param1"), (1, 1, "param2")]
w.set_ps(data)
assert w.get_ps() == data
def test_set_ps_realloc():
w = _wcs.Wcsprm()
w.set_ps([(0, 0, "param1")] * 16)
def test_set_pv():
w = _wcs.Wcsprm()
data = [(0, 0, 42.), (1, 1, 54.)]
w.set_pv(data)
assert w.get_pv() == data
def test_set_pv_realloc():
w = _wcs.Wcsprm()
w.set_pv([(0, 0, 42.)] * 16)
def test_spcfix():
# TODO: We need some data with broken spectral headers here to
# really test
header = get_pkg_data_contents(
'data/spectra/orion-velo-1.hdr', encoding='binary')
w = _wcs.Wcsprm(header)
assert w.spcfix() == -1
def test_spec():
w = _wcs.Wcsprm()
assert w.spec == -1
@raises(AttributeError)
def test_spec_set():
w = _wcs.Wcsprm()
w.spec = 0
def test_specsys():
w = _wcs.Wcsprm()
assert w.specsys == ''
w.specsys = 'foo'
assert w.specsys == 'foo'
def test_sptr():
# TODO: Write me
pass
def test_ssysobs():
w = _wcs.Wcsprm()
assert w.ssysobs == ''
w.ssysobs = 'foo'
assert w.ssysobs == 'foo'
def test_ssyssrc():
w = _wcs.Wcsprm()
assert w.ssyssrc == ''
w.ssyssrc = 'foo'
assert w.ssyssrc == 'foo'
def test_tab():
w = _wcs.Wcsprm()
assert len(w.tab) == 0
# TODO: Inject some headers that have tables and test
def test_theta0():
w = _wcs.Wcsprm()
assert np.isnan(w.theta0)
w.theta0 = 42.0
assert w.theta0 == 42.0
del w.theta0
assert np.isnan(w.theta0)
def test_toheader():
w = _wcs.Wcsprm()
assert isinstance(w.to_header(), str)
def test_velangl():
w = _wcs.Wcsprm()
assert np.isnan(w.velangl)
w.velangl = 42.0
assert w.velangl == 42.0
del w.velangl
assert np.isnan(w.velangl)
def test_velosys():
w = _wcs.Wcsprm()
assert np.isnan(w.velosys)
w.velosys = 42.0
assert w.velosys == 42.0
del w.velosys
assert np.isnan(w.velosys)
def test_velref():
w = _wcs.Wcsprm()
assert w.velref == 0.0
w.velref = 42.0
assert w.velref == 42.0
del w.velref
assert w.velref == 0.0
def test_zsource():
w = _wcs.Wcsprm()
assert np.isnan(w.zsource)
w.zsource = 42.0
assert w.zsource == 42.0
del w.zsource
assert np.isnan(w.zsource)
def test_cd_3d():
header = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary')
w = _wcs.Wcsprm(header)
assert w.cd.shape == (3, 3)
assert w.get_pc().shape == (3, 3)
assert w.get_cdelt().shape == (3,)
def test_get_pc():
header = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary')
w = _wcs.Wcsprm(header)
pc = w.get_pc()
try:
pc[0, 0] = 42
except (RuntimeError, ValueError):
pass
else:
raise AssertionError()
@raises(_wcs.SingularMatrixError)
def test_detailed_err():
w = _wcs.Wcsprm()
w.pc = [[0, 0], [0, 0]]
w.set()
def test_header_parse():
from astropy.io import fits
with get_pkg_data_fileobj(
'data/header_newlines.fits', encoding='binary') as test_file:
hdulist = fits.open(test_file)
w = wcs.WCS(hdulist[0].header)
assert w.wcs.ctype[0] == 'RA---TAN-SIP'
def test_locale():
orig_locale = locale.getlocale(locale.LC_NUMERIC)[0]
try:
locale.setlocale(locale.LC_NUMERIC, 'fr_FR')
except locale.Error:
pytest.xfail(
"Can't set to 'fr_FR' locale, perhaps because it is not installed "
"on this system")
try:
header = get_pkg_data_contents('data/locale.hdr', encoding='binary')
w = _wcs.Wcsprm(header)
assert re.search("[0-9]+,[0-9]*", w.to_header()) is None
finally:
if orig_locale is None:
# reset to the default setting
locale.resetlocale(locale.LC_NUMERIC)
else:
# restore to whatever the previous value had been set to for
# whatever reason
locale.setlocale(locale.LC_NUMERIC, orig_locale)
@raises(UnicodeEncodeError)
def test_unicode():
w = _wcs.Wcsprm()
w.alt = "‰"
def test_sub_segfault():
# Issue #1960
header = fits.Header.fromtextfile(
get_pkg_data_filename('data/sub-segfault.hdr'))
w = wcs.WCS(header)
sub = w.sub([wcs.WCSSUB_CELESTIAL])
gc.collect()
def test_bounds_check():
w = _wcs.Wcsprm()
w.bounds_check(False)
def test_wcs_sub_error_message():
# Issue #1587
w = _wcs.Wcsprm()
with pytest.raises(TypeError) as e:
w.sub('latitude')
assert str(e).endswith("axes must None, a sequence or an integer")
def test_wcs_sub():
# Issue #3356
w = _wcs.Wcsprm()
w.sub(['latitude'])
w = _wcs.Wcsprm()
w.sub([b'latitude'])
def test_compare():
header = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary')
w = _wcs.Wcsprm(header)
w2 = _wcs.Wcsprm(header)
assert w == w2
w.equinox = 42
assert w == w2
assert not w.compare(w2)
assert w.compare(w2, _wcs.WCSCOMPARE_ANCILLARY)
w = _wcs.Wcsprm(header)
w2 = _wcs.Wcsprm(header)
w.cdelt[0] = np.float32(0.00416666666666666666666666)
w2.cdelt[0] = np.float64(0.00416666666666666666666666)
assert not w.compare(w2)
assert w.compare(w2, tolerance=1e-6)
def test_radesys_defaults():
w = _wcs.Wcsprm()
w.ctype = ['RA---TAN', 'DEC--TAN']
w.set()
assert w.radesys == "ICRS"
def test_radesys_defaults_full():
# As described in Section 3.1 of the FITS standard "Equatorial and ecliptic
# coordinates", for those systems the RADESYS keyword can be used to
# indicate the equatorial/ecliptic frame to use. From the standard:
# "For RADESYSa values of FK4 and FK4-NO-E, any stated equinox is Besselian
# and, if neither EQUINOXa nor EPOCH are given, a default of 1950.0 is to
# be taken. For FK5, any stated equinox is Julian and, if neither keyword
# is given, it defaults to 2000.0.
# "If the EQUINOXa keyword is given it should always be accompanied by
# RADESYS a. However, if it should happen to ap- pear by itself then
# RADESYSa defaults to FK4 if EQUINOXa < 1984.0, or to FK5 if EQUINOXa
# 1984.0. Note that these defaults, while probably true of older files
# using the EPOCH keyword, are not required of them.
# By default RADESYS is empty
w = _wcs.Wcsprm(naxis=2)
assert w.radesys == ''
assert np.isnan(w.equinox)
# For non-ecliptic or equatorial systems it is still empty
w = _wcs.Wcsprm(naxis=2)
for ctype in [('GLON-CAR', 'GLAT-CAR'),
('SLON-SIN', 'SLAT-SIN')]:
w.ctype = ctype
w.set()
assert w.radesys == ''
assert np.isnan(w.equinox)
for ctype in [('RA---TAN', 'DEC--TAN'),
('ELON-TAN', 'ELAT-TAN'),
('DEC--TAN', 'RA---TAN'),
('ELAT-TAN', 'ELON-TAN')]:
# Check defaults for RADESYS
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.set()
assert w.radesys == 'ICRS'
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.equinox = 1980
w.set()
assert w.radesys == 'FK4'
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.equinox = 1984
w.set()
assert w.radesys == 'FK5'
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = 'foo'
w.set()
assert w.radesys == 'foo'
# Check defaults for EQUINOX
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.set()
assert np.isnan(w.equinox) # frame is ICRS, no equinox
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = 'ICRS'
w.set()
assert np.isnan(w.equinox)
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = 'FK5'
w.set()
assert w.equinox == 2000.
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = 'FK4'
w.set()
assert w.equinox == 1950
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = 'FK4-NO-E'
w.set()
assert w.equinox == 1950
def test_iteration():
world = np.array(
[[-0.58995335, -0.5],
[0.00664326, -0.5],
[-0.58995335, -0.25],
[0.00664326, -0.25],
[-0.58995335, 0.],
[0.00664326, 0.],
[-0.58995335, 0.25],
[0.00664326, 0.25],
[-0.58995335, 0.5],
[0.00664326, 0.5]],
float
)
w = wcs.WCS()
w.wcs.ctype = ['GLON-CAR', 'GLAT-CAR']
w.wcs.cdelt = [-0.006666666828, 0.006666666828]
w.wcs.crpix = [75.907, 74.8485]
x = w.wcs_world2pix(world, 1)
expected = np.array(
[[1.64400000e+02, -1.51498185e-01],
[7.49105110e+01, -1.51498185e-01],
[1.64400000e+02, 3.73485009e+01],
[7.49105110e+01, 3.73485009e+01],
[1.64400000e+02, 7.48485000e+01],
[7.49105110e+01, 7.48485000e+01],
[1.64400000e+02, 1.12348499e+02],
[7.49105110e+01, 1.12348499e+02],
[1.64400000e+02, 1.49848498e+02],
[7.49105110e+01, 1.49848498e+02]],
float)
assert_array_almost_equal(x, expected)
w2 = w.wcs_pix2world(x, 1)
world[:, 0] %= 360.
assert_array_almost_equal(w2, world)
def test_invalid_args():
with pytest.raises(TypeError):
w = _wcs.Wcsprm(keysel='A')
with pytest.raises(ValueError):
w = _wcs.Wcsprm(keysel=2)
with pytest.raises(ValueError):
w = _wcs.Wcsprm(colsel=2)
with pytest.raises(ValueError):
w = _wcs.Wcsprm(naxis=64)
header = get_pkg_data_contents(
'data/spectra/orion-velo-1.hdr', encoding='binary')
with pytest.raises(ValueError):
w = _wcs.Wcsprm(header, relax='FOO')
with pytest.raises(ValueError):
w = _wcs.Wcsprm(header, naxis=3)
with pytest.raises(KeyError):
w = _wcs.Wcsprm(header, key='A')
# Test keywords in the Time standard
def test_datebeg():
w = _wcs.Wcsprm()
assert w.datebeg == ''
w.datebeg = '2001-02-11'
assert w.datebeg == '2001-02-11'
w.datebeg = '31/12/99'
fix_ref = {
'cdfix': 'No change',
'cylfix': 'No change',
'obsfix': 'No change',
'datfix': "Invalid DATE-BEG format '31/12/99'",
'spcfix': 'No change',
'unitfix': 'No change',
'celfix': 'No change'}
assert w.fix() == fix_ref
char_keys = ['timesys', 'trefpos', 'trefdir', 'plephem', 'timeunit',
'dateref', 'dateavg', 'dateend']
@pytest.mark.parametrize('key', char_keys)
def test_char_keys(key):
w = _wcs.Wcsprm()
assert getattr(w, key) == ''
setattr(w, key, "foo")
assert getattr(w, key) == 'foo'
with pytest.raises(TypeError):
setattr(w, key, 42)
num_keys = ['mjdobs', 'mjdbeg', 'mjdend', 'jepoch',
'bepoch', 'tstart', 'tstop', 'xposure', 'timsyer',
'timrder', 'timedel', 'timepixr', 'timeoffs',
'telapse', 'xposure']
@pytest.mark.parametrize('key', num_keys)
def test_num_keys(key):
w = _wcs.Wcsprm()
assert np.isnan(getattr(w, key))
setattr(w, key, 42.0)
assert getattr(w, key) == 42.0
delattr(w, key)
assert np.isnan(getattr(w, key))
with pytest.raises(TypeError):
setattr(w, key, "foo")
array_keys = ['czphs', 'cperi', 'mjdref']
@pytest.mark.parametrize('key', array_keys)
def test_array_keys(key):
w = _wcs.Wcsprm()
attr = getattr(w, key)
assert np.all(np.isnan(attr))
assert attr.dtype == float
setattr(w, key, [1., 2.])
assert_array_equal(getattr(w, key), [1., 2.])
with pytest.raises(ValueError):
setattr(w, key, ["foo", "bar"])
with pytest.raises(ValueError):
setattr(w, key, "foo")
|
95aa4067d5ff45d8ea1a0212aa5cfc633be0d6725efd74b8d229662178f2b986 | # Note that we test the main astropy.wcs.WCS class directly rather than testing
# the mix-in class on its own (since it's not functional without being used as
# a mix-in)
import warnings
import numpy as np
import pytest
from numpy.testing import assert_equal, assert_allclose
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import Quantity
from astropy.coordinates import ICRS, FK5, Galactic, SkyCoord
from astropy.io.fits import Header
from astropy.utils.data import get_pkg_data_filename
from astropy.wcs.wcs import WCS
from astropy.wcs.wcsapi.fitswcs import custom_ctype_to_ucd_mapping
###############################################################################
# The following example is the simplest WCS with default values
###############################################################################
WCS_EMPTY = WCS(naxis=1)
WCS_EMPTY.wcs.crpix = [1]
def test_empty():
wcs = WCS_EMPTY
# Low-level API
assert wcs.pixel_n_dim == 1
assert wcs.world_n_dim == 1
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == [None]
assert wcs.world_axis_units == ['']
assert_equal(wcs.axis_correlation_matrix, True)
assert wcs.world_axis_object_components == [('world', 0, 'value')]
assert wcs.world_axis_object_classes['world'][0] is Quantity
assert wcs.world_axis_object_classes['world'][1] == ()
assert wcs.world_axis_object_classes['world'][2]['unit'] is u.one
assert_allclose(wcs.pixel_to_world_values(29), 29)
assert_allclose(wcs.array_index_to_world_values(29), 29)
assert np.ndim(wcs.pixel_to_world_values(29)) == 0
assert np.ndim(wcs.array_index_to_world_values(29)) == 0
assert_allclose(wcs.world_to_pixel_values(29), 29)
assert_equal(wcs.world_to_array_index_values(29), (29,))
assert np.ndim(wcs.world_to_pixel_values(29)) == 0
assert np.ndim(wcs.world_to_array_index_values(29)) == 0
# High-level API
coord = wcs.pixel_to_world(29)
assert_quantity_allclose(coord, 29 * u.one)
assert np.ndim(coord) == 0
coord = wcs.array_index_to_world(29)
assert_quantity_allclose(coord, 29 * u.one)
assert np.ndim(coord) == 0
coord = 15 * u.one
x = wcs.world_to_pixel(coord)
assert_allclose(x, 15.)
assert np.ndim(x) == 0
i = wcs.world_to_array_index(coord)
assert_equal(i, 15)
assert np.ndim(i) == 0
###############################################################################
# The following example is a simple 2D image with celestial coordinates
###############################################################################
HEADER_SIMPLE_CELESTIAL = """
WCSAXES = 2
CTYPE1 = RA---TAN
CTYPE2 = DEC--TAN
CRVAL1 = 10
CRVAL2 = 20
CRPIX1 = 30
CRPIX2 = 40
CDELT1 = -0.1
CDELT2 = 0.1
CROTA2 = 0.
CUNIT1 = deg
CUNIT2 = deg
"""
WCS_SIMPLE_CELESTIAL = WCS(Header.fromstring(HEADER_SIMPLE_CELESTIAL, sep='\n'))
def test_simple_celestial():
wcs = WCS_SIMPLE_CELESTIAL
# Low-level API
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 2
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == ['pos.eq.ra', 'pos.eq.dec']
assert wcs.world_axis_units == ['deg', 'deg']
assert_equal(wcs.axis_correlation_matrix, True)
assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'),
('celestial', 1, 'spherical.lat.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert_allclose(wcs.pixel_to_world_values(29, 39), (10, 20))
assert_allclose(wcs.array_index_to_world_values(39, 29), (10, 20))
assert_allclose(wcs.world_to_pixel_values(10, 20), (29., 39.))
assert_equal(wcs.world_to_array_index_values(10, 20), (39, 29))
# High-level API
coord = wcs.pixel_to_world(29, 39)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert coord.ra.deg == 10
assert coord.dec.deg == 20
coord = wcs.array_index_to_world(39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert coord.ra.deg == 10
assert coord.dec.deg == 20
coord = SkyCoord(10, 20, unit='deg', frame='icrs')
x, y = wcs.world_to_pixel(coord)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
i, j = wcs.world_to_array_index(coord)
assert_equal(i, 39)
assert_equal(j, 29)
# Check that if the coordinates are passed in a different frame things still
# work properly
coord_galactic = coord.galactic
x, y = wcs.world_to_pixel(coord_galactic)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
i, j = wcs.world_to_array_index(coord_galactic)
assert_equal(i, 39)
assert_equal(j, 29)
# Check that we can actually index the array
data = np.arange(3600).reshape((60, 60))
coord = SkyCoord(10, 20, unit='deg', frame='icrs')
index = wcs.world_to_array_index(coord)
assert_equal(data[index], 2369)
coord = SkyCoord([10, 12], [20, 22], unit='deg', frame='icrs')
index = wcs.world_to_array_index(coord)
assert_equal(data[index], [2369, 3550])
###############################################################################
# The following example is a spectral cube with axes in an unusual order
###############################################################################
HEADER_SPECTRAL_CUBE = """
WCSAXES = 3
CTYPE1 = GLAT-CAR
CTYPE2 = FREQ
CTYPE3 = GLON-CAR
CRVAL1 = 10
CRVAL2 = 20
CRVAL3 = 25
CRPIX1 = 30
CRPIX2 = 40
CRPIX3 = 45
CDELT1 = -0.1
CDELT2 = 0.5
CDELT3 = 0.1
CUNIT1 = deg
CUNIT2 = Hz
CUNIT3 = deg
"""
WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep='\n'))
def test_spectral_cube():
# Spectral cube with a weird axis ordering
wcs = WCS_SPECTRAL_CUBE
# Low-level API
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert_equal(wcs.axis_correlation_matrix, [[True, False, True], [False, True, False], [True, False, True]])
assert wcs.world_axis_object_components == [('celestial', 1, 'spherical.lat.degree'),
('freq', 0, 'value'),
('celestial', 0, 'spherical.lon.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['freq'][0] is Quantity
assert wcs.world_axis_object_classes['freq'][1] == ()
assert wcs.world_axis_object_classes['freq'][2] == {'unit': 'Hz'}
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 39., 44.))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
# High-level API
coord, spec = wcs.pixel_to_world(29, 39, 44)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, Galactic)
assert coord.l.deg == 25
assert coord.b.deg == 10
assert isinstance(spec, Quantity)
assert spec.to_value(u.Hz) == 20
coord, spec = wcs.array_index_to_world(44, 39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, Galactic)
assert coord.l.deg == 25
assert coord.b.deg == 10
assert isinstance(spec, Quantity)
assert spec.to_value(u.Hz) == 20
coord = SkyCoord(25, 10, unit='deg', frame='galactic')
spec = 20 * u.Hz
x, y, z = wcs.world_to_pixel(coord, spec)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
assert_allclose(z, 44.)
# Order of world coordinates shouldn't matter
x, y, z = wcs.world_to_pixel(spec, coord)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
assert_allclose(z, 44.)
i, j, k = wcs.world_to_array_index(coord, spec)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
# Order of world coordinates shouldn't matter
i, j, k = wcs.world_to_array_index(spec, coord)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
HEADER_SPECTRAL_CUBE_NONALIGNED = HEADER_SPECTRAL_CUBE.strip() + '\n' + """
PC2_3 = -0.5
PC3_2 = +0.5
"""
WCS_SPECTRAL_CUBE_NONALIGNED = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE_NONALIGNED, sep='\n'))
def test_spectral_cube_nonaligned():
# Make sure that correlation matrix gets adjusted if there are non-identity
# CD matrix terms.
wcs = WCS_SPECTRAL_CUBE_NONALIGNED
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert_equal(wcs.axis_correlation_matrix, [[True, True, True], [False, True, True], [True, True, True]])
# NOTE: we check world_axis_object_components and world_axis_object_classes
# again here because in the past this failed when non-aligned axes were
# present, so this serves as a regression test.
assert wcs.world_axis_object_components == [('celestial', 1, 'spherical.lat.degree'),
('freq', 0, 'value'),
('celestial', 0, 'spherical.lon.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['freq'][0] is Quantity
assert wcs.world_axis_object_classes['freq'][1] == ()
assert wcs.world_axis_object_classes['freq'][2] == {'unit': 'Hz'}
###############################################################################
# The following example is from Rots et al (2015), Table 5. It represents a
# cube with two spatial dimensions and one time dimension
###############################################################################
HEADER_TIME_CUBE = """
SIMPLE = T / Fits standard
BITPIX = -32 / Bits per pixel
NAXIS = 3 / Number of axes
NAXIS1 = 2048 / Axis length
NAXIS2 = 2048 / Axis length
NAXIS3 = 11 / Axis length
DATE = '2008-10-28T14:39:06' / Date FITS file was generated
OBJECT = '2008 TC3' / Name of the object observed
EXPTIME = 1.0011 / Integration time
MJD-OBS = 54746.02749237 / Obs start
DATE-OBS= '2008-10-07T00:39:35.3342' / Observing date
TELESCOP= 'VISTA' / ESO Telescope Name
INSTRUME= 'VIRCAM' / Instrument used.
TIMESYS = 'UTC' / From Observatory Time System
TREFPOS = 'TOPOCENT' / Topocentric
MJDREF = 54746.0 / Time reference point in MJD
RADESYS = 'ICRS' / Not equinoctal
CTYPE2 = 'RA---ZPN' / Zenithal Polynomial Projection
CRVAL2 = 2.01824372640628 / RA at ref pixel
CUNIT2 = 'deg' / Angles are degrees always
CRPIX2 = 2956.6 / Pixel coordinate at ref point
CTYPE1 = 'DEC--ZPN' / Zenithal Polynomial Projection
CRVAL1 = 14.8289418840003 / Dec at ref pixel
CUNIT1 = 'deg' / Angles are degrees always
CRPIX1 = -448.2 / Pixel coordinate at ref point
CTYPE3 = 'UTC' / linear time (UTC)
CRVAL3 = 2375.341 / Relative time of first frame
CUNIT3 = 's' / Time unit
CRPIX3 = 1.0 / Pixel coordinate at ref point
CTYPE3A = 'TT' / alternative linear time (TT)
CRVAL3A = 2440.525 / Relative time of first frame
CUNIT3A = 's' / Time unit
CRPIX3A = 1.0 / Pixel coordinate at ref point
OBSGEO-B= -24.6157 / [deg] Tel geodetic latitute (=North)+
OBSGEO-L= -70.3976 / [deg] Tel geodetic longitude (=East)+
OBSGEO-H= 2530.0000 / [m] Tel height above reference ellipsoid
CRDER3 = 0.0819 / random error in timings from fit
CSYER3 = 0.0100 / absolute time error
PC1_1 = 0.999999971570892 / WCS transform matrix element
PC1_2 = 0.000238449608932 / WCS transform matrix element
PC2_1 = -0.000621542859395 / WCS transform matrix element
PC2_2 = 0.999999806842218 / WCS transform matrix element
CDELT1 = -9.48575432499806E-5 / Axis scale at reference point
CDELT2 = 9.48683176211164E-5 / Axis scale at reference point
CDELT3 = 13.3629 / Axis scale at reference point
PV1_1 = 1. / ZPN linear term
PV1_3 = 42. / ZPN cubic term
"""
WCS_TIME_CUBE = WCS(Header.fromstring(HEADER_TIME_CUBE, sep='\n'))
def test_time_cube():
# Spectral cube with a weird axis ordering
wcs = WCS_TIME_CUBE
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (11, 2048, 2048)
assert wcs.pixel_shape == (2048, 2048, 11)
assert wcs.world_axis_physical_types == ['pos.eq.dec', 'pos.eq.ra', 'time']
assert wcs.world_axis_units == ['deg', 'deg', 's']
assert_equal(wcs.axis_correlation_matrix, [[True, True, False], [True, True, False], [False, False, True]])
with pytest.warns(FutureWarning):
assert wcs.world_axis_object_components == [
('celestial', 1, 'spherical.lat.degree'),
('celestial', 0, 'spherical.lon.degree'),
('utc', 0, 'value')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['utc'][0] is Quantity
assert wcs.world_axis_object_classes['utc'][1] == ()
assert wcs.world_axis_object_classes['utc'][2] == {'unit': 's'}
assert_allclose(wcs.pixel_to_world_values(-449.2, 2955.6, 0),
(14.8289418840003, 2.01824372640628, 2375.341))
assert_allclose(wcs.array_index_to_world_values(0, 2955.6, -449.2),
(14.8289418840003, 2.01824372640628, 2375.341))
assert_allclose(wcs.world_to_pixel_values(14.8289418840003, 2.01824372640628, 2375.341),
(-449.2, 2955.6, 0))
assert_equal(wcs.world_to_array_index_values(14.8289418840003, 2.01824372640628, 2375.341),
(0, 2956, -449))
# High-level API
# Make sure that we get a FutureWarning about the time column
with warnings.catch_warnings(record=True) as warning_entries:
warnings.resetwarnings()
coord, time = wcs.pixel_to_world(29, 39, 44)
# Check that there's at least one warning of the right category/message
assert len(warning_entries) > 0
found_warning = False
for w in warning_entries:
msg = 'In future, times will be represented by the Time class'
if w.category is FutureWarning and str(w.message).startswith(msg):
found_warning = True
assert found_warning
assert isinstance(coord, SkyCoord)
assert isinstance(time, Quantity)
###############################################################################
# Extra corner cases
###############################################################################
def test_unrecognized_unit():
# TODO: Determine whether the following behavior is desirable
wcs = WCS(naxis=1)
wcs.wcs.cunit = ['bananas // sekonds']
assert wcs.world_axis_units == ['bananas // sekonds']
def test_distortion_correlations():
filename = get_pkg_data_filename('../../tests/data/sip.fits')
w = WCS(filename)
assert_equal(w.axis_correlation_matrix, True)
# Changing PC to an identity matrix doesn't change anything since
# distortions are still present.
w.wcs.pc = [[1, 0], [0, 1]]
assert_equal(w.axis_correlation_matrix, True)
# Nor does changing the name of the axes to make them non-celestial
w.wcs.ctype = ['X', 'Y']
assert_equal(w.axis_correlation_matrix, True)
# However once we turn off the distortions the matrix changes
w.sip = None
assert_equal(w.axis_correlation_matrix, [[True, False], [False, True]])
# If we go back to celestial coordinates then the matrix is all True again
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_equal(w.axis_correlation_matrix, True)
# Or if we change to X/Y but have a non-identity PC
w.wcs.pc = [[0.9, -0.1], [0.1, 0.9]]
w.wcs.ctype = ['X', 'Y']
assert_equal(w.axis_correlation_matrix, True)
def test_custom_ctype_to_ucd_mappings():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ['SPAM']
assert wcs.world_axis_physical_types == [None]
# Check simple behavior
with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}):
assert wcs.world_axis_physical_types == [None]
with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit', 'SPAM': 'food.spam'}):
assert wcs.world_axis_physical_types == ['food.spam']
# Check nesting
with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}):
assert wcs.world_axis_physical_types == ['food.spam']
with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}):
with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
assert wcs.world_axis_physical_types == ['food.spam']
# Check priority in nesting
with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}):
with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
assert wcs.world_axis_physical_types == ['food.spam']
with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}):
assert wcs.world_axis_physical_types == ['notfood']
def test_caching_components_and_classes():
# Make sure that when we change the WCS object, the classes and components
# are updated (we use a cache internally, so we need to make sure the cache
# is invalidated if needed)
wcs = WCS_SIMPLE_CELESTIAL
assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'),
('celestial', 1, 'spherical.lat.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
wcs.wcs.radesys = 'FK5'
frame = wcs.world_axis_object_classes['celestial'][2]['frame']
assert isinstance(frame, FK5)
assert frame.equinox.jyear == 2000.
wcs.wcs.equinox = 2010
frame = wcs.world_axis_object_classes['celestial'][2]['frame']
assert isinstance(frame, FK5)
assert frame.equinox.jyear == 2010.
|
1d794576a8ce5a076386bc2ac14a630fd238ce9bad729b1f2245342d547cb198 | import numpy as np
from numpy.testing import assert_allclose
from astropy.units import Quantity
from astropy.coordinates import SkyCoord
from astropy.wcs.wcsapi.low_level_api import BaseLowLevelWCS
from astropy.wcs.wcsapi.high_level_api import HighLevelWCSMixin
class DoubleLowLevelWCS(BaseLowLevelWCS):
"""
Basic dummy transformation that doubles values.
"""
def pixel_to_world_values(self, *pixel_arrays):
return [np.asarray(pix) * 2 for pix in pixel_arrays]
def array_index_to_world_values(self, *index_arrays):
return [np.asarray(pix) * 2 for pix in index_arrays]
def world_to_pixel_values(self, *world_arrays):
return [np.asarray(world) / 2 for world in world_arrays]
def world_to_array_index_values(self, *world_arrays):
return [np.asarray(world) / 2 for world in world_arrays]
class SimpleDuplicateWCS(DoubleLowLevelWCS, HighLevelWCSMixin):
"""
This example WCS has two of the world coordinates that use the same class,
which triggers a different path in the high level WCS code.
"""
@property
def pixel_n_dim(self):
return 2
@property
def world_n_dim(self):
return 2
@property
def world_axis_physical_types(self):
return ['pos.eq.ra', 'pos.eq.dec']
@property
def world_axis_units(self):
return ['deg', 'deg']
@property
def world_axis_object_components(self):
return [('test1', 0, 'value'),
('test2', 0, 'value')]
@property
def world_axis_object_classes(self):
return {'test1': (Quantity, (), {'unit': 'deg'}),
'test2': (Quantity, (), {'unit': 'deg'})}
def test_simple_duplicate():
# Make sure that things work properly when the low-level WCS uses the same
# class for two of the coordinates.
wcs = SimpleDuplicateWCS()
q1, q2 = wcs.pixel_to_world(1, 2)
assert isinstance(q1, Quantity)
assert isinstance(q2, Quantity)
x, y = wcs.world_to_pixel(q1, q2)
assert_allclose(x, 1)
assert_allclose(y, 2)
class SkyCoordDuplicateWCS(DoubleLowLevelWCS, HighLevelWCSMixin):
"""
This example WCS returns two SkyCoord objects which, which triggers a
different path in the high level WCS code.
"""
@property
def pixel_n_dim(self):
return 4
@property
def world_n_dim(self):
return 4
@property
def world_axis_physical_types(self):
return ['pos.eq.ra', 'pos.eq.dec', 'pos.galactic.lon', 'pos.galactic.lat']
@property
def world_axis_units(self):
return ['deg', 'deg', 'deg', 'deg']
@property
def world_axis_object_components(self):
# Deliberately use 'ra'/'dec' here to make sure that string argument
# names work properly.
return [('test1', 'ra', 'spherical.lon.degree'),
('test1', 'dec', 'spherical.lat.degree'),
('test2', 0, 'spherical.lon.degree'),
('test2', 1, 'spherical.lat.degree')]
@property
def world_axis_object_classes(self):
return {'test1': (SkyCoord, (), {'unit': 'deg'}),
'test2': (SkyCoord, (), {'unit': 'deg', 'frame': 'galactic'})}
def test_skycoord_duplicate():
# Make sure that things work properly when the low-level WCS uses the same
# class, and specifically a SkyCoord for two of the coordinates.
wcs = SkyCoordDuplicateWCS()
c1, c2 = wcs.pixel_to_world(1, 2, 3, 4)
assert isinstance(c1, SkyCoord)
assert isinstance(c2, SkyCoord)
x, y, z, a = wcs.world_to_pixel(c1, c2)
assert_allclose(x, 1)
assert_allclose(y, 2)
assert_allclose(z, 3)
assert_allclose(a, 4)
class SerializedWCS(DoubleLowLevelWCS, HighLevelWCSMixin):
"""
WCS with serialized classes
"""
@property
def serialized_classes(self):
return True
@property
def pixel_n_dim(self):
return 2
@property
def world_n_dim(self):
return 2
@property
def world_axis_physical_types(self):
return ['pos.eq.ra', 'pos.eq.dec']
@property
def world_axis_units(self):
return ['deg', 'deg']
@property
def world_axis_object_components(self):
return [('test', 0, 'value')]
@property
def world_axis_object_classes(self):
return {'test': ('astropy.units.Quantity', (),
{'unit': ('astropy.units.Unit', ('deg',), {})})}
def test_serialized_classes():
wcs = SerializedWCS()
q = wcs.pixel_to_world(1)
assert isinstance(q, Quantity)
x = wcs.world_to_pixel(q)
assert_allclose(x, 1)
|
685d9ac4bdb74b220bc16171a5159161f45e4714fdc7771acb5fe6bb9bea51fd | import pytest
from numpy.testing import assert_equal, assert_allclose
from astropy.wcs import WCS
from astropy.io.fits import Header
from astropy.coordinates import SkyCoord, Galactic
from astropy.units import Quantity
from astropy.wcs.wcsapi.sliced_low_level_wcs import SlicedLowLevelWCS, sanitize_slices
import astropy.units as u
# To test the slicing we start off from standard FITS WCS
# objects since those implement the low-level API. We create
# a WCS for a spectral cube with axes in non-standard order
# and with correlated celestial axes and an uncorrelated
# spectral axis.
HEADER_SPECTRAL_CUBE = """
NAXIS = 3
NAXIS1 = 10
NAXIS2 = 20
NAXIS3 = 30
CTYPE1 = GLAT-CAR
CTYPE2 = FREQ
CTYPE3 = GLON-CAR
CRVAL1 = 10
CRVAL2 = 20
CRVAL3 = 25
CRPIX1 = 30
CRPIX2 = 40
CRPIX3 = 45
CDELT1 = -0.1
CDELT2 = 0.5
CDELT3 = 0.1
CUNIT1 = deg
CUNIT2 = Hz
CUNIT3 = deg
"""
WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep='\n'))
WCS_SPECTRAL_CUBE.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
@pytest.mark.parametrize("item, ndim, expected", (
([Ellipsis, 10], 4, [slice(None)] * 3 + [10]),
([10, slice(20, 30)], 5, [10, slice(20, 30)] + [slice(None)] * 3),
([10, Ellipsis, 8], 10, [10] + [slice(None)] * 8 + [8])
))
def test_sanitize_slice(item, ndim, expected):
new_item = sanitize_slices(item, ndim)
# FIXME: do we still need the first two since the third assert
# should cover it all?
assert len(new_item) == ndim
assert all(isinstance(i, (slice, int)) for i in new_item)
assert new_item == expected
EXPECTED_ELLIPSIS_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 10)
Pixel Dim Data size Bounds
0 10 (-1, 11)
1 20 (-2, 18)
2 30 (5, 15)
World Dim Physical Type Units
0 pos.galactic.lat deg
1 em.freq Hz
2 pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_ellipsis():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, Ellipsis)
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 10)
assert wcs.pixel_shape == (10, 20, 30)
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert_equal(wcs.axis_correlation_matrix, [[True, False, True], [False, True, False], [True, False, True]])
assert wcs.world_axis_object_components == [('celestial', 1, 'spherical.lat.degree'),
('freq', 0, 'value'),
('celestial', 0, 'spherical.lon.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['freq'][0] is Quantity
assert wcs.world_axis_object_classes['freq'][1] == ()
assert wcs.world_axis_object_classes['freq'][2] == {'unit': 'Hz'}
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 39., 44.))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (-2, 18), (5, 15)])
assert str(wcs) == repr(wcs) == EXPECTED_ELLIPSIS_REPR.strip()
EXPECTED_SPECTRAL_SLICE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 2 pixel and 2 world dimensions
Array shape (Numpy order): (30, 10)
Pixel Dim Data size Bounds
0 10 (-1, 11)
1 30 (5, 15)
World Dim Physical Type Units
0 pos.galactic.lat deg
1 pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1
0 yes yes
1 yes yes
"""
def test_spectral_slice():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [slice(None), 10])
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 2
assert wcs.array_shape == (30, 10)
assert wcs.pixel_shape == (10, 30)
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'deg']
assert_equal(wcs.axis_correlation_matrix, [[True, True], [True, True]])
assert wcs.world_axis_object_components == [('celestial', 1, 'spherical.lat.degree'),
('celestial', 0, 'spherical.lon.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert_allclose(wcs.pixel_to_world_values(29, 44), (10, 25))
assert_allclose(wcs.array_index_to_world_values(44, 29), (10, 25))
assert_allclose(wcs.world_to_pixel_values(10, 25), (29., 44.))
assert_equal(wcs.world_to_array_index_values(10, 25), (44, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (5, 15)])
assert str(wcs) == repr(wcs) == EXPECTED_SPECTRAL_SLICE_REPR.strip()
EXPECTED_SPECTRAL_RANGE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 6, 10)
Pixel Dim Data size Bounds
0 10 (-1, 11)
1 6 (-6, 14)
2 30 (5, 15)
World Dim Physical Type Units
0 pos.galactic.lat deg
1 em.freq Hz
2 pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_spectral_range():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [slice(None), slice(4, 10)])
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 6, 10)
assert wcs.pixel_shape == (10, 6, 30)
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert_equal(wcs.axis_correlation_matrix, [[True, False, True], [False, True, False], [True, False, True]])
assert wcs.world_axis_object_components == [('celestial', 1, 'spherical.lat.degree'),
('freq', 0, 'value'),
('celestial', 0, 'spherical.lon.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['freq'][0] is Quantity
assert wcs.world_axis_object_classes['freq'][1] == ()
assert wcs.world_axis_object_classes['freq'][2] == {'unit': 'Hz'}
assert_allclose(wcs.pixel_to_world_values(29, 35, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 35, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 35., 44.))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 35, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (-6, 14), (5, 15)])
assert str(wcs) == repr(wcs) == EXPECTED_SPECTRAL_RANGE_REPR.strip()
EXPECTED_CELESTIAL_SLICE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 2 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20)
Pixel Dim Data size Bounds
0 20 (-2, 18)
1 30 (5, 15)
World Dim Physical Type Units
0 pos.galactic.lat deg
1 em.freq Hz
2 pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1
0 no yes
1 yes no
2 no yes
"""
def test_celestial_slice():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [Ellipsis, 5])
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20)
assert wcs.pixel_shape == (20, 30)
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert_equal(wcs.axis_correlation_matrix, [[False, True], [True, False], [False, True]])
assert wcs.world_axis_object_components == [('celestial', 1, 'spherical.lat.degree'),
('freq', 0, 'value'),
('celestial', 0, 'spherical.lon.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['freq'][0] is Quantity
assert wcs.world_axis_object_classes['freq'][1] == ()
assert wcs.world_axis_object_classes['freq'][2] == {'unit': 'Hz'}
assert_allclose(wcs.pixel_to_world_values(39, 44), (12.4, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39), (12.4, 20, 25))
assert_allclose(wcs.world_to_pixel_values(12.4, 20, 25), (39., 44.))
assert_equal(wcs.world_to_array_index_values(12.4, 20, 25), (44, 39))
assert_equal(wcs.pixel_bounds, [(-2, 18), (5, 15)])
assert str(wcs) == repr(wcs) == EXPECTED_CELESTIAL_SLICE_REPR.strip()
EXPECTED_CELESTIAL_RANGE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 5)
Pixel Dim Data size Bounds
0 5 (-6, 6)
1 20 (-2, 18)
2 30 (5, 15)
World Dim Physical Type Units
0 pos.galactic.lat deg
1 em.freq Hz
2 pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_celestial_range():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [Ellipsis, slice(5, 10)])
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 5)
assert wcs.pixel_shape == (5, 20, 30)
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert_equal(wcs.axis_correlation_matrix, [[True, False, True], [False, True, False], [True, False, True]])
assert wcs.world_axis_object_components == [('celestial', 1, 'spherical.lat.degree'),
('freq', 0, 'value'),
('celestial', 0, 'spherical.lon.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['freq'][0] is Quantity
assert wcs.world_axis_object_classes['freq'][1] == ()
assert wcs.world_axis_object_classes['freq'][2] == {'unit': 'Hz'}
assert_allclose(wcs.pixel_to_world_values(24, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 24), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (24., 39., 44.))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 24))
assert_equal(wcs.pixel_bounds, [(-6, 6), (-2, 18), (5, 15)])
assert str(wcs) == repr(wcs) == EXPECTED_CELESTIAL_RANGE_REPR.strip()
# Now try with a 90 degree rotation
WCS_SPECTRAL_CUBE_ROT = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep='\n'))
WCS_SPECTRAL_CUBE_ROT.wcs.pc = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
WCS_SPECTRAL_CUBE_ROT.wcs.crval[0] = 0
WCS_SPECTRAL_CUBE_ROT.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
EXPECTED_CELESTIAL_RANGE_ROT_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 5)
Pixel Dim Data size Bounds
0 5 (-6, 6)
1 20 (-2, 18)
2 30 (5, 15)
World Dim Physical Type Units
0 pos.galactic.lat deg
1 em.freq Hz
2 pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_celestial_range_rot():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE_ROT, [Ellipsis, slice(5, 10)])
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 5)
assert wcs.pixel_shape == (5, 20, 30)
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert_equal(wcs.axis_correlation_matrix, [[True, False, True], [False, True, False], [True, False, True]])
assert wcs.world_axis_object_components == [('celestial', 1, 'spherical.lat.degree'),
('freq', 0, 'value'),
('celestial', 0, 'spherical.lon.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['freq'][0] is Quantity
assert wcs.world_axis_object_classes['freq'][1] == ()
assert wcs.world_axis_object_classes['freq'][2] == {'unit': 'Hz'}
assert_allclose(wcs.pixel_to_world_values(14, 29, 34), (1, 15, 24))
assert_allclose(wcs.array_index_to_world_values(34, 29, 14), (1, 15, 24))
assert_allclose(wcs.world_to_pixel_values(1, 15, 24), (14., 29., 34.))
assert_equal(wcs.world_to_array_index_values(1, 15, 24), (34, 29, 14))
assert_equal(wcs.pixel_bounds, [(-6, 6), (-2, 18), (5, 15)])
assert str(wcs) == repr(wcs) == EXPECTED_CELESTIAL_RANGE_ROT_REPR.strip()
HEADER_NO_SHAPE_CUBE = """
NAXIS = 3
CTYPE1 = GLAT-CAR
CTYPE2 = FREQ
CTYPE3 = GLON-CAR
CRVAL1 = 10
CRVAL2 = 20
CRVAL3 = 25
CRPIX1 = 30
CRPIX2 = 40
CRPIX3 = 45
CDELT1 = -0.1
CDELT2 = 0.5
CDELT3 = 0.1
CUNIT1 = deg
CUNIT2 = Hz
CUNIT3 = deg
"""
WCS_NO_SHAPE_CUBE = WCS(Header.fromstring(HEADER_NO_SHAPE_CUBE, sep='\n'))
EXPECTED_NO_SHAPE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): None
Pixel Dim Data size Bounds
0 None None
1 None None
2 None None
World Dim Physical Type Units
0 pos.galactic.lat deg
1 em.freq Hz
2 pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_no_array_shape():
wcs = SlicedLowLevelWCS(WCS_NO_SHAPE_CUBE, Ellipsis)
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert_equal(wcs.axis_correlation_matrix, [[True, False, True], [False, True, False], [True, False, True]])
assert wcs.world_axis_object_components == [('celestial', 1, 'spherical.lat.degree'),
('freq', 0, 'value'),
('celestial', 0, 'spherical.lon.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['freq'][0] is Quantity
assert wcs.world_axis_object_classes['freq'][1] == ()
assert wcs.world_axis_object_classes['freq'][2] == {'unit': 'Hz'}
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 39., 44.))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
assert str(wcs) == repr(wcs) == EXPECTED_NO_SHAPE_REPR.strip()
|
bbfc46b57da1a1ace9d84d4012d6ed9c6a78664198565875d210251b897490e6 | from pytest import raises
from astropy.tests.helper import assert_quantity_allclose
from astropy import units as u
from astropy.wcs.wcsapi.utils import deserialize_class
def test_construct():
result = deserialize_class(('astropy.units.Quantity', (10,), {'unit': 'deg'}))
assert_quantity_allclose(result, 10 * u.deg)
def test_noconstruct():
result = deserialize_class(('astropy.units.Quantity', (), {'unit': 'deg'}), construct=False)
assert result == (u.Quantity, (), {'unit': 'deg'})
def test_invalid():
with raises(ValueError) as exc:
deserialize_class(('astropy.units.Quantity', (), {'unit': 'deg'}, ()))
assert exc.value.args[0] == 'Expected a tuple of three values'
|
f00b56c3d1bc2e091152dfbf148168677ddac517d675236f2148d56589375f06 | from pytest import raises
from astropy.wcs.wcsapi.low_level_api import validate_physical_types
def test_validate_physical_types():
# Check valid cases
validate_physical_types(['pos.eq.ra', 'pos.eq.ra'])
validate_physical_types(['spect.dopplerVeloc.radio', 'custom:spam'])
validate_physical_types(['time', None])
# Make sure validation is case sensitive
with raises(ValueError) as exc:
validate_physical_types(['pos.eq.ra', 'Pos.eq.dec'])
assert exc.value.args[0] == 'Invalid physical type: Pos.eq.dec'
# Make sure nonsense types are picked up
with raises(ValueError) as exc:
validate_physical_types(['spam'])
assert exc.value.args[0] == 'Invalid physical type: spam'
|
933dac30c1d1c95d142c237197faf45306d872c02d155f9f0258874939ad06a0 | import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.coordinates import SkyCoord
from astropy.wcs.wcsapi.low_level_api import BaseLowLevelWCS
from astropy.wcs.wcsapi.high_level_wcs_wrapper import HighLevelWCSWrapper
class CustomLowLevelWCS(BaseLowLevelWCS):
@property
def pixel_n_dim(self):
return 2
@property
def world_n_dim(self):
return 2
@property
def world_axis_physical_types(self):
return ['pos.eq.ra', 'pos.eq.dec']
@property
def world_axis_units(self):
return ['deg', 'deg']
def pixel_to_world_values(self, *pixel_arrays):
return [np.asarray(pix) * 2 for pix in pixel_arrays]
def array_index_to_world_values(self, *index_arrays):
return [np.asarray(pix) * 2 for pix in index_arrays]
def world_to_pixel_values(self, *world_arrays):
return [np.asarray(world) / 2 for world in world_arrays]
def world_to_array_index_values(self, *world_arrays):
return [np.asarray(world) / 2 for world in world_arrays]
@property
def world_axis_object_components(self):
return [('test', 0, 'spherical.lon.degree'),
('test', 1, 'spherical.lat.degree')]
@property
def world_axis_object_classes(self):
return {'test': (SkyCoord, (), {'unit': 'deg'})}
def test_wrapper():
wcs = CustomLowLevelWCS()
wrapper = HighLevelWCSWrapper(wcs)
coord = wrapper.pixel_to_world(1, 2)
assert isinstance(coord, SkyCoord)
assert coord.isscalar
x, y = wrapper.world_to_pixel(coord)
assert_allclose(x, 1)
assert_allclose(y, 2)
assert wrapper.low_level_wcs is wcs
assert wrapper.pixel_n_dim == 2
assert wrapper.world_n_dim == 2
assert wrapper.world_axis_physical_types == ['pos.eq.ra', 'pos.eq.dec']
assert wrapper.world_axis_units == ['deg', 'deg']
assert wrapper.array_shape is None
assert wrapper.pixel_bounds is None
assert wrapper.axis_correlation_matrix is None
def test_wrapper_invalid():
class InvalidCustomLowLevelWCS(CustomLowLevelWCS):
@property
def world_axis_object_classes(self):
return {}
wcs = InvalidCustomLowLevelWCS()
wrapper = HighLevelWCSWrapper(wcs)
with pytest.raises(KeyError):
wrapper.pixel_to_world(1, 2)
|
6f273f7db3ef39328cc5968b61f8c7d10054f35f360e0502939bda064247c02d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import itertools
import numpy as np
import operator
import pytest
from datetime import timedelta
from astropy.time import (Time, TimeDelta, OperandTypeError, ScaleValueError,
TIME_SCALES, STANDARD_TIME_SCALES, TIME_DELTA_SCALES)
from astropy import units as u
allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
class TestTimeDelta():
"""Test TimeDelta class"""
def setup(self):
self.t = Time('2010-01-01', scale='utc')
self.t2 = Time('2010-01-02 00:00:01', scale='utc')
self.t3 = Time('2010-01-03 01:02:03', scale='utc', precision=9,
in_subfmt='date_hms', out_subfmt='date_hm',
location=(-75.*u.degree, 30.*u.degree, 500*u.m))
self.t4 = Time('2010-01-01', scale='local')
self.dt = TimeDelta(100.0, format='sec')
self.dt_array = TimeDelta(np.arange(100, 1000, 100), format='sec')
def test_sub(self):
# time - time
dt = self.t2 - self.t
assert (repr(dt).startswith("<TimeDelta object: scale='tai' "
"format='jd' value=1.00001157407"))
assert allclose_jd(dt.jd, 86401.0 / 86400.0)
assert allclose_sec(dt.sec, 86401.0)
# time - delta_time
t = self.t2 - dt
assert t.iso == self.t.iso
# delta_time - delta_time
dt2 = dt - self.dt
assert allclose_sec(dt2.sec, 86301.0)
# delta_time - time
with pytest.raises(OperandTypeError):
dt - self.t
def test_add(self):
# time + time
with pytest.raises(OperandTypeError):
self.t2 + self.t
# time + delta_time
dt = self.t2 - self.t
t2 = self.t + dt
assert t2.iso == self.t2.iso
# delta_time + delta_time
dt2 = dt + self.dt
assert allclose_sec(dt2.sec, 86501.0)
# delta_time + time
dt = self.t2 - self.t
t2 = dt + self.t
assert t2.iso == self.t2.iso
def test_add_vector(self):
"""Check time arithmetic as well as properly keeping track of whether
a time is a scalar or a vector"""
t = Time(0.0, format='mjd', scale='utc')
t2 = Time([0.0, 1.0], format='mjd', scale='utc')
dt = TimeDelta(100.0, format='jd')
dt2 = TimeDelta([100.0, 200.0], format='jd')
out = t + dt
assert allclose_jd(out.mjd, 100.0)
assert out.isscalar
out = t + dt2
assert allclose_jd(out.mjd, [100.0, 200.0])
assert not out.isscalar
out = t2 + dt
assert allclose_jd(out.mjd, [100.0, 101.0])
assert not out.isscalar
out = dt + dt
assert allclose_jd(out.jd, 200.0)
assert out.isscalar
out = dt + dt2
assert allclose_jd(out.jd, [200.0, 300.0])
assert not out.isscalar
# Reverse the argument order
out = dt + t
assert allclose_jd(out.mjd, 100.0)
assert out.isscalar
out = dt2 + t
assert allclose_jd(out.mjd, [100.0, 200.0])
assert not out.isscalar
out = dt + t2
assert allclose_jd(out.mjd, [100.0, 101.0])
assert not out.isscalar
out = dt2 + dt
assert allclose_jd(out.jd, [200.0, 300.0])
assert not out.isscalar
def test_sub_vector(self):
"""Check time arithmetic as well as properly keeping track of whether
a time is a scalar or a vector"""
t = Time(0.0, format='mjd', scale='utc')
t2 = Time([0.0, 1.0], format='mjd', scale='utc')
dt = TimeDelta(100.0, format='jd')
dt2 = TimeDelta([100.0, 200.0], format='jd')
out = t - dt
assert allclose_jd(out.mjd, -100.0)
assert out.isscalar
out = t - dt2
assert allclose_jd(out.mjd, [-100.0, -200.0])
assert not out.isscalar
out = t2 - dt
assert allclose_jd(out.mjd, [-100.0, -99.0])
assert not out.isscalar
out = dt - dt
assert allclose_jd(out.jd, 0.0)
assert out.isscalar
out = dt - dt2
assert allclose_jd(out.jd, [0.0, -100.0])
assert not out.isscalar
@pytest.mark.parametrize('values', [(2455197.5, 2455198.5),
([2455197.5], [2455198.5])])
def test_copy_timedelta(self, values):
"""Test copying the values of a TimeDelta object by passing it into the
Time initializer.
"""
val1, val2 = values
t = Time(val1, format='jd', scale='utc')
t2 = Time(val2, format='jd', scale='utc')
dt = t2 - t
dt2 = TimeDelta(dt, copy=False)
assert np.all(dt.jd == dt2.jd)
assert dt._time.jd1 is dt2._time.jd1
assert dt._time.jd2 is dt2._time.jd2
dt2 = TimeDelta(dt, copy=True)
assert np.all(dt.jd == dt2.jd)
assert dt._time.jd1 is not dt2._time.jd1
assert dt._time.jd2 is not dt2._time.jd2
# Include initializers
dt2 = TimeDelta(dt, format='sec')
assert allclose_sec(dt2.value, 86400.0)
def test_neg_abs(self):
for dt in (self.dt, self.dt_array):
dt2 = -dt
assert np.all(dt2.jd == -dt.jd)
dt3 = abs(dt)
assert np.all(dt3.jd == dt.jd)
dt4 = abs(dt2)
assert np.all(dt4.jd == dt.jd)
def test_mul_div(self):
for dt in (self.dt, self.dt_array):
dt2 = dt + dt + dt
dt3 = 3. * dt
assert allclose_jd(dt2.jd, dt3.jd)
dt4 = dt3 / 3.
assert allclose_jd(dt4.jd, dt.jd)
dt5 = self.dt * np.arange(3)
assert dt5[0].jd == 0.
assert dt5[-1].jd == (self.dt + self.dt).jd
dt6 = self.dt * [0, 1, 2]
assert np.all(dt6.jd == dt5.jd)
with pytest.raises(OperandTypeError):
self.dt * self.t
with pytest.raises(TypeError):
self.dt * object()
def test_keep_properties(self):
# closes #1924 (partially)
dt = TimeDelta(1000., format='sec')
for t in (self.t, self.t3):
ta = t + dt
assert ta.location is t.location
assert ta.precision == t.precision
assert ta.in_subfmt == t.in_subfmt
assert ta.out_subfmt == t.out_subfmt
tr = dt + t
assert tr.location is t.location
assert tr.precision == t.precision
assert tr.in_subfmt == t.in_subfmt
assert tr.out_subfmt == t.out_subfmt
ts = t - dt
assert ts.location is t.location
assert ts.precision == t.precision
assert ts.in_subfmt == t.in_subfmt
assert ts.out_subfmt == t.out_subfmt
t_tdb = self.t.tdb
assert hasattr(t_tdb, '_delta_tdb_tt')
assert not hasattr(t_tdb, '_delta_ut1_utc')
t_tdb_ut1 = t_tdb.ut1
assert hasattr(t_tdb_ut1, '_delta_tdb_tt')
assert hasattr(t_tdb_ut1, '_delta_ut1_utc')
t_tdb_ut1_utc = t_tdb_ut1.utc
assert hasattr(t_tdb_ut1_utc, '_delta_tdb_tt')
assert hasattr(t_tdb_ut1_utc, '_delta_ut1_utc')
# adding or subtracting some time should remove the delta's
# since these are time-dependent and should be recalculated
for op in (operator.add, operator.sub):
t1 = op(t_tdb, dt)
assert not hasattr(t1, '_delta_tdb_tt')
assert not hasattr(t1, '_delta_ut1_utc')
t2 = op(t_tdb_ut1, dt)
assert not hasattr(t2, '_delta_tdb_tt')
assert not hasattr(t2, '_delta_ut1_utc')
t3 = op(t_tdb_ut1_utc, dt)
assert not hasattr(t3, '_delta_tdb_tt')
assert not hasattr(t3, '_delta_ut1_utc')
def test_set_format(self):
"""
Test basics of setting format attribute.
"""
dt = TimeDelta(86400.0, format='sec')
assert dt.value == 86400.0
assert dt.format == 'sec'
dt.format = 'jd'
assert dt.value == 1.0
assert dt.format == 'jd'
dt.format = 'datetime'
assert dt.value == timedelta(days=1)
assert dt.format == 'datetime'
class TestTimeDeltaScales():
"""Test scale conversion for Time Delta.
Go through @taldcroft's list of expected behavior from #1932"""
def setup(self):
# pick a date that includes a leap second for better testing
self.iso_times = ['2012-06-30 12:00:00', '2012-06-30 23:59:59',
'2012-07-01 00:00:00', '2012-07-01 12:00:00']
self.t = dict((scale, Time(self.iso_times, scale=scale, precision=9))
for scale in TIME_SCALES)
self.dt = dict((scale, self.t[scale]-self.t[scale][0])
for scale in TIME_SCALES)
def test_delta_scales_definition(self):
for scale in list(TIME_DELTA_SCALES) + [None]:
TimeDelta([0., 1., 10.], format='sec', scale=scale)
with pytest.raises(ScaleValueError):
TimeDelta([0., 1., 10.], format='sec', scale='utc')
@pytest.mark.parametrize(('scale1', 'scale2'),
list(itertools.product(STANDARD_TIME_SCALES,
STANDARD_TIME_SCALES)))
def test_standard_scales_for_time_minus_time(self, scale1, scale2):
"""T(X) - T2(Y) -- does T(X) - T2(Y).X and return dT(X)
and T(X) +/- dT(Y) -- does (in essence) (T(X).Y +/- dT(Y)).X
I.e., time differences of two times should have the scale of the
first time. The one exception is UTC, which returns TAI.
There are no standard timescales for which this does not work.
"""
t1 = self.t[scale1]
t2 = self.t[scale2]
dt = t1 - t2
if scale1 in TIME_DELTA_SCALES:
assert dt.scale == scale1
else:
assert scale1 == 'utc'
assert dt.scale == 'tai'
# now check with delta time; also check reversibility
t1_recover_t2_scale = t2 + dt
assert t1_recover_t2_scale.scale == scale2
t1_recover = getattr(t1_recover_t2_scale, scale1)
assert allclose_jd(t1_recover.jd, t1.jd)
t2_recover_t1_scale = t1 - dt
assert t2_recover_t1_scale.scale == scale1
t2_recover = getattr(t2_recover_t1_scale, scale2)
assert allclose_jd(t2_recover.jd, t2.jd)
def test_local_scales_for_time_minus_time(self):
""" T1(local) - T2(local) should return dT(local)
T1(local) +/- dT(local) or T1(local) +/- Quantity(time-like) should
also return T(local)
I.e. Tests that time differences of two local scale times should
return delta time with local timescale. Furthermore, checks that
arithmetic of T(local) with dT(None) or time-like quantity does work.
Also tests that subtracting two Time objects, one having local time
scale and other having standard time scale should raise TypeError.
"""
t1 = self.t['local']
t2 = Time('2010-01-01', scale='local')
dt = t1 - t2
assert dt.scale == 'local'
# now check with delta time
t1_recover = t2 + dt
assert t1_recover.scale == 'local'
assert allclose_jd(t1_recover.jd, t1.jd)
# check that dT(None) can be subtracted from T(local)
dt2 = TimeDelta([10.], format='sec', scale=None)
t3 = t2 - dt2
assert t3.scale == t2.scale
# check that time quantity can be subtracted from T(local)
q = 10 * u.s
assert (t2 - q).value == (t2 - dt2).value
# Check that one cannot subtract/add times with a standard scale
# from a local one (or vice versa)
t1 = self.t['local']
for scale in STANDARD_TIME_SCALES:
t2 = self.t[scale]
with pytest.raises(TypeError):
t1 - t2
with pytest.raises(TypeError):
t2 - t1
with pytest.raises(TypeError):
t2 - dt
with pytest.raises(TypeError):
t2 + dt
with pytest.raises(TypeError):
dt + t2
def test_scales_for_delta_minus_delta(self):
"""dT(X) +/- dT2(Y) -- Add/substract JDs for dT(X) and dT(Y).X
I.e. this will succeed if dT(Y) can be converted to scale X.
Returns delta time in scale X
"""
# geocentric timescales
dt_tai = self.dt['tai']
dt_tt = self.dt['tt']
dt0 = dt_tai - dt_tt
assert dt0.scale == 'tai'
# tai and tt have the same scale, so differences should be the same
assert allclose_sec(dt0.sec, 0.)
dt_tcg = self.dt['tcg']
dt1 = dt_tai - dt_tcg
assert dt1.scale == 'tai'
# tai and tcg do not have the same scale, so differences different
assert not allclose_sec(dt1.sec, 0.)
t_tai_tcg = self.t['tai'].tcg
dt_tai_tcg = t_tai_tcg - t_tai_tcg[0]
dt2 = dt_tai - dt_tai_tcg
assert dt2.scale == 'tai'
# but if tcg difference calculated from tai, it should roundtrip
assert allclose_sec(dt2.sec, 0.)
# check that if we put TCG first, we get a TCG scale back
dt3 = dt_tai_tcg - dt_tai
assert dt3.scale == 'tcg'
assert allclose_sec(dt3.sec, 0.)
for scale in 'tdb', 'tcb', 'ut1':
with pytest.raises(TypeError):
dt_tai - self.dt[scale]
# barycentric timescales
dt_tcb = self.dt['tcb']
dt_tdb = self.dt['tdb']
dt4 = dt_tcb - dt_tdb
assert dt4.scale == 'tcb'
assert not allclose_sec(dt1.sec, 0.)
t_tcb_tdb = self.t['tcb'].tdb
dt_tcb_tdb = t_tcb_tdb - t_tcb_tdb[0]
dt5 = dt_tcb - dt_tcb_tdb
assert dt5.scale == 'tcb'
assert allclose_sec(dt5.sec, 0.)
for scale in 'utc', 'tai', 'tt', 'tcg', 'ut1':
with pytest.raises(TypeError):
dt_tcb - self.dt[scale]
# rotational timescale
dt_ut1 = self.dt['ut1']
dt5 = dt_ut1 - dt_ut1[-1]
assert dt5.scale == 'ut1'
assert dt5[-1].sec == 0.
for scale in 'utc', 'tai', 'tt', 'tcg', 'tcb', 'tdb':
with pytest.raises(TypeError):
dt_ut1 - self.dt[scale]
# local time scale
dt_local = self.dt['local']
dt6 = dt_local - dt_local[-1]
assert dt6.scale == 'local'
assert dt6[-1].sec == 0.
for scale in 'utc', 'tai', 'tt', 'tcg', 'tcb', 'tdb', 'ut1':
with pytest.raises(TypeError):
dt_local - self.dt[scale]
@pytest.mark.parametrize(
('scale', 'op'), list(itertools.product(TIME_SCALES,
(operator.add, operator.sub))))
def test_scales_for_delta_scale_is_none(self, scale, op):
"""T(X) +/- dT(None) or T(X) +/- Quantity(time-like)
This is always allowed and just adds JDs, i.e., the scale of
the TimeDelta or time-like Quantity will be taken to be X.
The one exception is again for X=UTC, where TAI is assumed instead,
so that a day is always defined as 86400 seconds.
"""
dt_none = TimeDelta([0., 1., -1., 1000.], format='sec')
assert dt_none.scale is None
q_time = dt_none.to('s')
dt = self.dt[scale]
dt1 = op(dt, dt_none)
assert dt1.scale == dt.scale
assert allclose_jd(dt1.jd, op(dt.jd, dt_none.jd))
dt2 = op(dt_none, dt)
assert dt2.scale == dt.scale
assert allclose_jd(dt2.jd, op(dt_none.jd, dt.jd))
dt3 = op(q_time, dt)
assert dt3.scale == dt.scale
assert allclose_jd(dt3.jd, dt2.jd)
t = self.t[scale]
t1 = op(t, dt_none)
assert t1.scale == t.scale
assert allclose_jd(t1.jd, op(t.jd, dt_none.jd))
if op is operator.add:
t2 = op(dt_none, t)
assert t2.scale == t.scale
assert allclose_jd(t2.jd, t1.jd)
t3 = op(t, q_time)
assert t3.scale == t.scale
assert allclose_jd(t3.jd, t1.jd)
@pytest.mark.parametrize('scale', TIME_SCALES)
def test_delta_day_is_86400_seconds(self, scale):
"""TimeDelta or Quantity holding 1 day always means 24*60*60 seconds
This holds true for all timescales but UTC, for which leap-second
days are longer or shorter by one second.
"""
t = self.t[scale]
dt_day = TimeDelta(1., format='jd')
q_day = dt_day.to('day')
dt_day_leap = t[-1] - t[0]
# ^ = exclusive or, so either equal and not UTC, or not equal and UTC
assert allclose_jd(dt_day_leap.jd, dt_day.jd) ^ (scale == 'utc')
t1 = t[0] + dt_day
assert allclose_jd(t1.jd, t[-1].jd) ^ (scale == 'utc')
t2 = q_day + t[0]
assert allclose_jd(t2.jd, t[-1].jd) ^ (scale == 'utc')
t3 = t[-1] - dt_day
assert allclose_jd(t3.jd, t[0].jd) ^ (scale == 'utc')
t4 = t[-1] - q_day
assert allclose_jd(t4.jd, t[0].jd) ^ (scale == 'utc')
def test_timedelta_setitem():
t = TimeDelta([1, 2, 3] * u.d, format='jd')
t[0] = 0.5
assert allclose_jd(t.value, [0.5, 2, 3])
t[1:] = 4.5
assert allclose_jd(t.value, [0.5, 4.5, 4.5])
t[:] = 86400 * u.s
assert allclose_jd(t.value, [1, 1, 1])
t[1] = TimeDelta(2, format='jd')
assert allclose_jd(t.value, [1, 2, 1])
with pytest.raises(ValueError) as err:
t[1] = 1 * u.m
assert 'cannot convert value to a compatible TimeDelta' in str(err)
def test_timedelta_setitem_sec():
t = TimeDelta([1, 2, 3], format='sec')
t[0] = 0.5
assert allclose_jd(t.value, [0.5, 2, 3])
t[1:] = 4.5
assert allclose_jd(t.value, [0.5, 4.5, 4.5])
t[:] = 1 * u.day
assert allclose_jd(t.value, [86400, 86400, 86400])
t[1] = TimeDelta(2, format='jd')
assert allclose_jd(t.value, [86400, 86400 * 2, 86400])
with pytest.raises(ValueError) as err:
t[1] = 1 * u.m
assert 'cannot convert value to a compatible TimeDelta' in str(err)
def test_timedelta_mask():
t = TimeDelta([1, 2] * u.d, format='jd')
t[1] = np.ma.masked
assert np.all(t.mask == [False, True])
assert allclose_jd(t[0].value, 1)
assert t.value[1] is np.ma.masked
def test_python_timedelta_scalar():
td = timedelta(days=1, seconds=1)
td1 = TimeDelta(td, format='datetime')
assert td1.sec == 86401.0
td2 = TimeDelta(86401.0, format='sec')
assert td2.datetime == td
def test_python_timedelta_vector():
td = [[timedelta(days=1), timedelta(days=2)],
[timedelta(days=3), timedelta(days=4)]]
td1 = TimeDelta(td, format='datetime')
assert np.all(td1.jd == [[1, 2], [3, 4]])
td2 = TimeDelta([[1, 2], [3, 4]], format='jd')
assert np.all(td2.datetime == td)
def test_timedelta_to_datetime():
td = TimeDelta(1, format='jd')
assert td.to_datetime() == timedelta(days=1)
td2 = TimeDelta([[1, 2], [3, 4]], format='jd')
td = [[timedelta(days=1), timedelta(days=2)],
[timedelta(days=3), timedelta(days=4)]]
assert np.all(td2.to_datetime() == td)
def test_insert_timedelta():
tm = TimeDelta([1, 2], format='sec')
# Insert a scalar using an auto-parsed string
tm2 = tm.insert(1, TimeDelta([10, 20], format='sec'))
assert np.all(tm2 == TimeDelta([1, 10, 20, 2], format='sec'))
|
14d44cfba60dde3b7cbdb96058a4725f66ef2bfdcf3c63c8964928ee2e9106bc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import functools
import datetime
from copy import deepcopy
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import catch_warnings, pytest
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils import isiterable
from astropy.time import Time, ScaleValueError, STANDARD_TIME_SCALES, TimeString, TimezoneInfo
from astropy.coordinates import EarthLocation
from astropy import units as u
from astropy import _erfa as erfa
from astropy.table import Column
try:
import pytz
HAS_PYTZ = True
except ImportError:
HAS_PYTZ = False
allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
allclose_year = functools.partial(np.allclose, rtol=2. ** -52,
atol=0.) # 14 microsec at current epoch
def setup_function(func):
func.FORMATS_ORIG = deepcopy(Time.FORMATS)
def teardown_function(func):
Time.FORMATS.clear()
Time.FORMATS.update(func.FORMATS_ORIG)
class TestBasic():
"""Basic tests stemming from initial example and API reference"""
def test_simple(self):
times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00']
t = Time(times, format='iso', scale='utc')
assert (repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5+1.4288980208333335e-06,
-0.50000000e+00]))
# Set scale to TAI
t = t.tai
assert (repr(t) == "<Time object: scale='tai' format='iso' "
"value=['1999-01-01 00:00:32.123' '2010-01-01 00:00:34.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5+0.00037179926839122024,
-0.5+0.00039351851851851852]))
# Get a new ``Time`` object which is referenced to the TT scale
# (internal JD1 and JD1 are now with respect to TT scale)"""
assert (repr(t.tt) == "<Time object: scale='tt' format='iso' "
"value=['1999-01-01 00:01:04.307' '2010-01-01 00:01:06.184']>")
# Get the representation of the ``Time`` object in a particular format
# (in this case seconds since 1998.0). This returns either a scalar or
# array, depending on whether the input was a scalar or array"""
assert allclose_sec(t.cxcsec, np.array([31536064.307456788, 378691266.18400002]))
def test_different_dimensions(self):
"""Test scalars, vector, and higher-dimensions"""
# scalar
val, val1 = 2450000.0, 0.125
t1 = Time(val, val1, format='jd')
assert t1.isscalar is True and t1.shape == ()
# vector
val = np.arange(2450000., 2450010.)
t2 = Time(val, format='jd')
assert t2.isscalar is False and t2.shape == val.shape
# explicitly check broadcasting for mixed vector, scalar.
val2 = 0.
t3 = Time(val, val2, format='jd')
assert t3.isscalar is False and t3.shape == val.shape
val2 = (np.arange(5.)/10.).reshape(5, 1)
# now see if broadcasting to two-dimensional works
t4 = Time(val, val2, format='jd')
assert t4.isscalar is False
assert t4.shape == np.broadcast(val, val2).shape
@pytest.mark.parametrize('value', [2455197.5, [2455197.5]])
def test_copy_time(self, value):
"""Test copying the values of a Time object by passing it into the
Time initializer.
"""
t = Time(value, format='jd', scale='utc')
t2 = Time(t, copy=False)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is t2._time.jd1
assert t._time.jd2 is t2._time.jd2
t2 = Time(t, copy=True)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is not t2._time.jd1
assert t._time.jd2 is not t2._time.jd2
# Include initializers
t2 = Time(t, format='iso', scale='tai', precision=1)
assert t2.value == '2010-01-01 00:00:34.0'
t2 = Time(t, format='iso', scale='tai', out_subfmt='date')
assert t2.value == '2010-01-01'
def test_getitem(self):
"""Test that Time objects holding arrays are properly subscriptable,
set isscalar as appropriate, and also subscript delta_ut1_utc, etc."""
mjd = np.arange(50000, 50010)
t = Time(mjd, format='mjd', scale='utc', location=('45d', '50d'))
t1 = t[3]
assert t1.isscalar is True
assert t1._time.jd1 == t._time.jd1[3]
assert t1.location is t.location
t1a = Time(mjd[3], format='mjd', scale='utc')
assert t1a.isscalar is True
assert np.all(t1._time.jd1 == t1a._time.jd1)
t1b = Time(t[3])
assert t1b.isscalar is True
assert np.all(t1._time.jd1 == t1b._time.jd1)
t2 = t[4:6]
assert t2.isscalar is False
assert np.all(t2._time.jd1 == t._time.jd1[4:6])
assert t2.location is t.location
t2a = Time(t[4:6])
assert t2a.isscalar is False
assert np.all(t2a._time.jd1 == t._time.jd1[4:6])
t2b = Time([t[4], t[5]])
assert t2b.isscalar is False
assert np.all(t2b._time.jd1 == t._time.jd1[4:6])
t2c = Time((t[4], t[5]))
assert t2c.isscalar is False
assert np.all(t2c._time.jd1 == t._time.jd1[4:6])
t.delta_tdb_tt = np.arange(len(t)) # Explicitly set (not testing .tdb)
t3 = t[4:6]
assert np.all(t3._delta_tdb_tt == t._delta_tdb_tt[4:6])
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.arange(len(mjd)), np.arange(len(mjd))))
t5 = t4[3]
assert t5.location == t4.location[3]
t6 = t4[4:6]
assert np.all(t6.location == t4.location[4:6])
# check it is a view
# (via ndarray, since quantity setter problematic for structured array)
allzeros = np.array((0., 0., 0.), dtype=t4.location.dtype)
assert t6.location.view(np.ndarray)[-1] != allzeros
assert t4.location.view(np.ndarray)[5] != allzeros
t6.location.view(np.ndarray)[-1] = allzeros
assert t4.location.view(np.ndarray)[5] == allzeros
# Test subscription also works for two-dimensional arrays.
frac = np.arange(0., 0.999, 0.2)
t7 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=('45d', '50d'))
assert t7[0, 0]._time.jd1 == t7._time.jd1[0, 0]
assert t7[0, 0].isscalar is True
assert np.all(t7[5]._time.jd1 == t7._time.jd1[5])
assert np.all(t7[5]._time.jd2 == t7._time.jd2[5])
assert np.all(t7[:, 2]._time.jd1 == t7._time.jd1[:, 2])
assert np.all(t7[:, 2]._time.jd2 == t7._time.jd2[:, 2])
assert np.all(t7[:, 0]._time.jd1 == t._time.jd1)
assert np.all(t7[:, 0]._time.jd2 == t._time.jd2)
# Get tdb to check that delta_tdb_tt attribute is sliced properly.
t7_tdb = t7.tdb
assert t7_tdb[0, 0].delta_tdb_tt == t7_tdb.delta_tdb_tt[0, 0]
assert np.all(t7_tdb[5].delta_tdb_tt == t7_tdb.delta_tdb_tt[5])
assert np.all(t7_tdb[:, 2].delta_tdb_tt == t7_tdb.delta_tdb_tt[:, 2])
# Explicitly set delta_tdb_tt attribute. Now it should not be sliced.
t7.delta_tdb_tt = 0.1
t7_tdb2 = t7.tdb
assert t7_tdb2[0, 0].delta_tdb_tt == 0.1
assert t7_tdb2[5].delta_tdb_tt == 0.1
assert t7_tdb2[:, 2].delta_tdb_tt == 0.1
# Check broadcasting of location.
t8 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=(np.arange(len(frac)), np.arange(len(frac))))
assert t8[0, 0].location == t8.location[0, 0]
assert np.all(t8[5].location == t8.location[5])
assert np.all(t8[:, 2].location == t8.location[:, 2])
# Finally check empty array.
t9 = t[:0]
assert t9.isscalar is False
assert t9.shape == (0,)
assert t9.size == 0
def test_properties(self):
"""Use properties to convert scales and formats. Note that the UT1 to
UTC transformation requires a supplementary value (``delta_ut1_utc``)
that can be obtained by interpolating from a table supplied by IERS.
This is tested separately."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert allclose_jd(t.jd, 2455197.5)
assert t.iso == '2010-01-01 00:00:00.000'
assert t.tt.iso == '2010-01-01 00:01:06.184'
assert t.tai.fits == '2010-01-01T00:00:34.000'
assert allclose_jd(t.utc.jd, 2455197.5)
assert allclose_jd(t.ut1.jd, 2455197.500003867)
assert t.tcg.isot == '2010-01-01T00:01:06.910'
assert allclose_sec(t.unix, 1262304000.0)
assert allclose_sec(t.cxcsec, 378691266.184)
assert allclose_sec(t.gps, 946339215.0)
assert t.datetime == datetime.datetime(2010, 1, 1)
def test_precision(self):
"""Set the output precision which is used for some formats. This is
also a test of the code that provides a dict for global and instance
options."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
# Uses initial class-defined precision=3
assert t.iso == '2010-01-01 00:00:00.000'
# Set instance precision to 9
t.precision = 9
assert t.iso == '2010-01-01 00:00:00.000000000'
assert t.tai.utc.iso == '2010-01-01 00:00:00.000000000'
def test_transforms(self):
"""Transform from UTC to all supported time scales (TAI, TCB, TCG,
TDB, TT, UT1, UTC). This requires auxiliary information (latitude and
longitude)."""
lat = 19.48125
lon = -155.933222
t = Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=6, location=(lon, lat))
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == '2006-01-15 21:24:37.500000'
assert t.ut1.iso == '2006-01-15 21:24:37.834100'
assert t.tai.iso == '2006-01-15 21:25:10.500000'
assert t.tt.iso == '2006-01-15 21:25:42.684000'
assert t.tcg.iso == '2006-01-15 21:25:43.322690'
assert t.tdb.iso == '2006-01-15 21:25:42.684373'
assert t.tcb.iso == '2006-01-15 21:25:56.893952'
def test_location(self):
"""Check that location creates an EarthLocation object, and that
such objects can be used as arguments.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(lon, lat))
assert isinstance(t.location, EarthLocation)
location = EarthLocation(lon, lat)
t2 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=location)
assert isinstance(t2.location, EarthLocation)
assert t2.location == t.location
t3 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(location.x, location.y, location.z))
assert isinstance(t3.location, EarthLocation)
assert t3.location == t.location
def test_location_array(self):
"""Check that location arrays are checked for size and used
for the corresponding times. Also checks that erfa
can handle array-valued locations, and can broadcast these if needed.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5']*2, format='iso', scale='utc',
precision=6, location=(lon, lat))
assert np.all(t.utc.iso == '2006-01-15 21:24:37.500000')
assert np.all(t.tdb.iso[0] == '2006-01-15 21:25:42.684373')
t2 = Time(['2006-01-15 21:24:37.5']*2, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
assert np.all(t2.utc.iso == '2006-01-15 21:24:37.500000')
assert t2.tdb.iso[0] == '2006-01-15 21:25:42.684373'
assert t2.tdb.iso[1] != '2006-01-15 21:25:42.684373'
with pytest.raises(ValueError): # 1 time, but two locations
Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
with pytest.raises(ValueError): # 3 times, but two locations
Time(['2006-01-15 21:24:37.5']*3, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
# multidimensional
mjd = np.arange(50000., 50008.).reshape(4, 2)
t3 = Time(mjd, format='mjd', scale='utc', location=(lon, lat))
assert t3.shape == (4, 2)
assert t3.location.shape == ()
assert t3.tdb.shape == t3.shape
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.array([lon, 0]), np.array([lat, 0])))
assert t4.shape == (4, 2)
assert t4.location.shape == t4.shape
assert t4.tdb.shape == t4.shape
t5 = Time(mjd, format='mjd', scale='utc',
location=(np.array([[lon], [0], [0], [0]]),
np.array([[lat], [0], [0], [0]])))
assert t5.shape == (4, 2)
assert t5.location.shape == t5.shape
assert t5.tdb.shape == t5.shape
def test_all_scale_transforms(self):
"""Test that standard scale transforms work. Does not test correctness,
except reversibility [#2074]. Also tests that standard scales can't be
converted to local scales"""
lat = 19.48125
lon = -155.933222
for scale1 in STANDARD_TIME_SCALES:
t1 = Time('2006-01-15 21:24:37.5', format='iso', scale=scale1,
location=(lon, lat))
for scale2 in STANDARD_TIME_SCALES:
t2 = getattr(t1, scale2)
t21 = getattr(t2, scale1)
assert allclose_jd(t21.jd, t1.jd)
# test for conversion to local scale
scale3 = 'local'
with pytest.raises(ScaleValueError):
t2 = getattr(t1, scale3)
def test_creating_all_formats(self):
"""Create a time object using each defined format"""
Time(2000.5, format='decimalyear')
Time(100.0, format='cxcsec')
Time(100.0, format='unix')
Time(100.0, format='gps')
Time(1950.0, format='byear', scale='tai')
Time(2000.0, format='jyear', scale='tai')
Time('B1950.0', format='byear_str', scale='tai')
Time('J2000.0', format='jyear_str', scale='tai')
Time('2000-01-01 12:23:34.0', format='iso', scale='tai')
Time('2000-01-01 12:23:34.0Z', format='iso', scale='utc')
Time('2000-01-01T12:23:34.0', format='isot', scale='tai')
Time('2000-01-01T12:23:34.0Z', format='isot', scale='utc')
Time('2000-01-01T12:23:34.0', format='fits')
Time('2000-01-01T12:23:34.0', format='fits', scale='tdb')
Time(2400000.5, 51544.0333981, format='jd', scale='tai')
Time(0.0, 51544.0333981, format='mjd', scale='tai')
Time('2000:001:12:23:34.0', format='yday', scale='tai')
Time('2000:001:12:23:34.0Z', format='yday', scale='utc')
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
Time(dt, format='datetime', scale='tai')
Time([dt, dt], format='datetime', scale='tai')
dt64 = np.datetime64('2012-06-18T02:00:05.453000000', format='datetime64')
Time(dt64, format='datetime64', scale='tai')
Time([dt64, dt64], format='datetime64', scale='tai')
def test_local_format_transforms(self):
"""
Test trasformation of local time to different formats
Transformation to formats with reference time should give
ScalevalueError
"""
t = Time('2006-01-15 21:24:37.5', scale='local')
assert_allclose(t.jd, 2453751.3921006946, atol=0.001/3600./24., rtol=0.)
assert_allclose(t.mjd, 53750.892100694444, atol=0.001/3600./24., rtol=0.)
assert_allclose(t.decimalyear, 2006.0408002758752, atol=0.001/3600./24./365., rtol=0.)
assert t.datetime == datetime.datetime(2006, 1, 15, 21, 24, 37, 500000)
assert t.isot == '2006-01-15T21:24:37.500'
assert t.yday == '2006:015:21:24:37.500'
assert t.fits == '2006-01-15T21:24:37.500'
assert_allclose(t.byear, 2006.04217888831, atol=0.001/3600./24./365., rtol=0.)
assert_allclose(t.jyear, 2006.0407723496082, atol=0.001/3600./24./365., rtol=0.)
assert t.byear_str == 'B2006.042'
assert t.jyear_str == 'J2006.041'
# epochTimeFormats
with pytest.raises(ScaleValueError):
t2 = t.gps
with pytest.raises(ScaleValueError):
t2 = t.unix
with pytest.raises(ScaleValueError):
t2 = t.cxcsec
with pytest.raises(ScaleValueError):
t2 = t.plot_date
def test_datetime(self):
"""
Test datetime format, including guessing the format from the input type
by not providing the format keyword to Time.
"""
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
dt2 = datetime.datetime(2001, 1, 1)
t = Time(dt, scale='utc', precision=9)
assert t.iso == '2000-01-02 03:04:05.123456000'
assert t.datetime == dt
assert t.value == dt
t2 = Time(t.iso, scale='utc')
assert t2.datetime == dt
t = Time([dt, dt2], scale='utc')
assert np.all(t.value == [dt, dt2])
t = Time('2000-01-01 01:01:01.123456789', scale='tai')
assert t.datetime == datetime.datetime(2000, 1, 1, 1, 1, 1, 123457)
# broadcasting
dt3 = (dt + (dt2-dt)*np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale='utc')
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1])
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2]))
assert Time(t3[2, 0]) == t3[2, 0]
def test_datetime64(self):
dt64 = np.datetime64('2000-01-02T03:04:05.123456789')
dt64_2 = np.datetime64('2000-01-02')
t = Time(dt64, scale='utc', precision=9, format='datetime64')
assert t.iso == '2000-01-02 03:04:05.123456789'
assert t.datetime64 == dt64
assert t.value == dt64
t2 = Time(t.iso, scale='utc')
assert t2.datetime64 == dt64
t = Time(dt64_2, scale='utc', precision=3, format='datetime64')
assert t.iso == '2000-01-02 00:00:00.000'
assert t.datetime64 == dt64_2
assert t.value == dt64_2
t2 = Time(t.iso, scale='utc')
assert t2.datetime64 == dt64_2
t = Time([dt64, dt64_2], scale='utc', format='datetime64')
assert np.all(t.value == [dt64, dt64_2])
t = Time('2000-01-01 01:01:01.123456789', scale='tai')
assert t.datetime64 == np.datetime64('2000-01-01T01:01:01.123456789')
# broadcasting
dt3 = (dt64 + (dt64_2-dt64)*np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale='utc', format='datetime64')
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1], format='datetime64')
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2], format='datetime64'))
assert Time(t3[2, 0], format='datetime64') == t3[2, 0]
def test_epoch_transform(self):
"""Besselian and julian epoch transforms"""
jd = 2457073.05631
t = Time(jd, format='jd', scale='tai', precision=6)
assert allclose_year(t.byear, 2015.1365941020817)
assert allclose_year(t.jyear, 2015.1349933196439)
assert t.byear_str == 'B2015.136594'
assert t.jyear_str == 'J2015.134993'
t2 = Time(t.byear, format='byear', scale='tai')
assert allclose_jd(t2.jd, jd)
t2 = Time(t.jyear, format='jyear', scale='tai')
assert allclose_jd(t2.jd, jd)
t = Time('J2015.134993', scale='tai', precision=6)
assert np.allclose(t.jd, jd, rtol=1e-10, atol=0) # J2015.134993 has 10 digit precision
assert t.byear_str == 'B2015.136594'
def test_input_validation(self):
"""Wrong input type raises error"""
times = [10, 20]
with pytest.raises(ValueError):
Time(times, format='iso', scale='utc')
with pytest.raises(ValueError):
Time('2000:001', format='jd', scale='utc')
with pytest.raises(ValueError):
Time([50000.0], ['bad'], format='mjd', scale='tai')
with pytest.raises(ValueError):
Time(50000.0, 'bad', format='mjd', scale='tai')
with pytest.raises(ValueError):
Time('2005-08-04T00:01:02.000Z', scale='tai')
# regression test against #3396
with pytest.raises(ValueError):
Time(np.nan, format='jd', scale='utc')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(TAI)', scale='utc')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(TAI')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(UT(NIST)')
def test_utc_leap_sec(self):
"""Time behaves properly near or in UTC leap second. This
uses the 2012-06-30 leap second for testing."""
for year, month, day in ((2012, 6, 30), (2016, 12, 31)):
# Start with a day without a leap second and note rollover
yyyy_mm = '{:04d}-{:02d}'.format(year, month)
yyyy_mm_dd = '{:04d}-{:02d}-{:02d}'.format(year, month, day)
t1 = Time(yyyy_mm + '-01 23:59:60.0', scale='utc')
assert t1.iso == yyyy_mm + '-02 00:00:00.000'
# Leap second is different
t1 = Time(yyyy_mm_dd + ' 23:59:59.900', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:59.900'
t1 = Time(yyyy_mm_dd + ' 23:59:60.000', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.000'
t1 = Time(yyyy_mm_dd + ' 23:59:60.999', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.999'
if month == 6:
yyyy_mm_dd_plus1 = '{:04d}-07-01'.format(year)
else:
yyyy_mm_dd_plus1 = '{:04d}-01-01'.format(year+1)
t1 = Time(yyyy_mm_dd + ' 23:59:61.0', scale='utc')
assert t1.iso == yyyy_mm_dd_plus1 + ' 00:00:00.000'
# Delta time gives 2 seconds here as expected
t0 = Time(yyyy_mm_dd + ' 23:59:59', scale='utc')
t1 = Time(yyyy_mm_dd_plus1 + ' 00:00:00', scale='utc')
assert allclose_sec((t1 - t0).sec, 2.0)
def test_init_from_time_objects(self):
"""Initialize from one or more Time objects"""
t1 = Time('2007:001', scale='tai')
t2 = Time(['2007-01-02', '2007-01-03'], scale='utc')
# Init from a list of Time objects without an explicit scale
t3 = Time([t1, t2])
# Test that init appropriately combines a scalar (t1) and list (t2)
# and that scale and format are same as first element.
assert len(t3) == 3
assert t3.scale == t1.scale
assert t3.format == t1.format # t1 format is yday
assert np.all(t3.value == np.concatenate([[t1.yday], t2.tai.yday]))
# Init from a single Time object without a scale
t3 = Time(t1)
assert t3.isscalar
assert t3.scale == t1.scale
assert t3.format == t1.format
assert np.all(t3.value == t1.value)
# Init from a single Time object with scale specified
t3 = Time(t1, scale='utc')
assert t3.scale == 'utc'
assert np.all(t3.value == t1.utc.value)
# Init from a list of Time object with scale specified
t3 = Time([t1, t2], scale='tt')
assert t3.scale == 'tt'
assert t3.format == t1.format # yday
assert np.all(t3.value == np.concatenate([[t1.tt.yday], t2.tt.yday]))
# OK, how likely is this... but might as well test.
mjd = np.arange(50000., 50006.)
frac = np.arange(0., 0.999, 0.2)
t4 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc')
t5 = Time([t4[:2], t4[4:5]])
assert t5.shape == (3, 5)
# throw error when deriving local scale time
# from non local time scale
with pytest.raises(ValueError):
t6 = Time(t1, scale='local')
class TestVal2():
"""Tests related to val2"""
def test_val2_ignored(self):
"""Test that val2 is ignored for string input"""
t = Time('2001:001', 'ignored', scale='utc')
assert t.yday == '2001:001:00:00:00.000'
def test_val2(self):
"""Various tests of the val2 input"""
t = Time([0.0, 50000.0], [50000.0, 0.0], format='mjd', scale='tai')
assert t.mjd[0] == t.mjd[1]
assert t.jd[0] == t.jd[1]
def test_val_broadcasts_against_val2(self):
mjd = np.arange(50000., 50007.)
frac = np.arange(0., 0.999, 0.2)
t = Time(mjd[:, np.newaxis], frac, format='mjd', scale='utc')
assert t.shape == (7, 5)
with pytest.raises(ValueError):
Time([0.0, 50000.0], [0.0, 1.0, 2.0], format='mjd', scale='tai')
class TestSubFormat():
"""Test input and output subformat functionality"""
def test_input_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
assert np.all(t.iso == np.array(['2000-01-01 00:00:00.000',
'2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
# Heterogeneous input formats with in_subfmt='date_*'
times = ['2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
in_subfmt='date_*')
assert np.all(t.iso == np.array(['2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
def test_input_subformat_fail(self):
"""Failed format matching"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='date')
def test_bad_input_subformat(self):
"""Non-existent input subformat"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='doesnt exist')
def test_output_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
out_subfmt='date_hm')
assert np.all(t.iso == np.array(['2000-01-01 00:00',
'2000-01-01 01:01',
'2000-01-01 01:01',
'2000-01-01 01:01']))
def test_fits_format(self):
"""FITS format includes bigger years."""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01T01:01:01', '2000-01-01T01:01:01.123']
t = Time(times, format='fits', scale='tai')
assert np.all(t.fits == np.array(['2000-01-01T00:00:00.000',
'2000-01-01T01:01:01.000',
'2000-01-01T01:01:01.123']))
# Explicit long format for output, default scale is UTC.
t2 = Time(times, format='fits', out_subfmt='long*')
assert np.all(t2.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'+02000-01-01T01:01:01.123']))
# Implicit long format for output, because of negative year.
times[2] = '-00594-01-01'
t3 = Time(times, format='fits', scale='tai')
assert np.all(t3.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'-00594-01-01T00:00:00.000']))
# Implicit long format for output, because of large positive year.
times[2] = '+10594-01-01'
t4 = Time(times, format='fits', scale='tai')
assert np.all(t4.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'+10594-01-01T00:00:00.000']))
def test_yday_format(self):
"""Year:Day_of_year format"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-12-01', '2001-12-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
t.out_subfmt = 'date_hm'
assert np.all(t.yday == np.array(['2000:336:00:00',
'2001:335:01:01']))
t.out_subfmt = '*'
assert np.all(t.yday == np.array(['2000:336:00:00:00.000',
'2001:335:01:01:01.123']))
def test_scale_input(self):
"""Test for issues related to scale input"""
# Check case where required scale is defined by the TimeFormat.
# All three should work.
t = Time(100.0, format='cxcsec', scale='utc')
assert t.scale == 'utc'
t = Time(100.0, format='unix', scale='tai')
assert t.scale == 'tai'
t = Time(100.0, format='gps', scale='utc')
assert t.scale == 'utc'
# Check that bad scale is caught when format is specified
with pytest.raises(ScaleValueError):
Time(1950.0, format='byear', scale='bad scale')
# Check that bad scale is caught when format is auto-determined
with pytest.raises(ScaleValueError):
Time('2000:001:00:00:00', scale='bad scale')
def test_fits_scale(self):
"""Test that the previous FITS-string formatting can still be handled
but with a DeprecationWarning."""
for inputs in (("2000-01-02(TAI)", "tai"),
("1999-01-01T00:00:00.123(ET(NIST))", "tt"),
("2014-12-12T01:00:44.1(UTC)", "utc")):
with catch_warnings(AstropyDeprecationWarning):
t = Time(inputs[0])
assert t.scale == inputs[1]
# Create Time using normal ISOT syntax and compare with FITS
t2 = Time(inputs[0][:inputs[0].index("(")], format="isot",
scale=inputs[1])
assert t == t2
# Explicit check that conversions still work despite warning
with catch_warnings(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:00.123456789(UTC)')
t = t.tai
assert t.isot == '1999-01-01T00:00:32.123'
with catch_warnings(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)')
t = t.utc
assert t.isot == '1999-01-01T00:00:00.123'
# Check scale consistency
with catch_warnings(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)', scale="tai")
assert t.scale == "tai"
with catch_warnings(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(ET)', scale="tt")
assert t.scale == "tt"
with pytest.raises(ValueError):
t = Time('1999-01-01T00:00:32.123456789(TAI)', scale="utc")
def test_scale_default(self):
"""Test behavior when no scale is provided"""
# These first three are TimeFromEpoch and have an intrinsic time scale
t = Time(100.0, format='cxcsec')
assert t.scale == 'tt'
t = Time(100.0, format='unix')
assert t.scale == 'utc'
t = Time(100.0, format='gps')
assert t.scale == 'tai'
for date in ('2000:001', '2000-01-01T00:00:00'):
t = Time(date)
assert t.scale == 'utc'
t = Time(2000.1, format='byear')
assert t.scale == 'tt'
t = Time('J2000')
assert t.scale == 'tt'
def test_epoch_times(self):
"""Test time formats derived from EpochFromTime"""
t = Time(0.0, format='cxcsec', scale='tai')
assert t.tt.iso == '1998-01-01 00:00:00.000'
# Create new time object from this one and change scale, format
t2 = Time(t, scale='tt', format='iso')
assert t2.value == '1998-01-01 00:00:00.000'
# Value take from Chandra.Time.DateTime('2010:001:00:00:00').secs
t_cxcsec = 378691266.184
t = Time(t_cxcsec, format='cxcsec', scale='utc')
assert allclose_sec(t.value, t_cxcsec)
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.value, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
assert t.yday == '2010:001:00:00:00.000'
t = Time('2010:001:00:00:00.000', scale='utc')
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
# Value from:
# d = datetime.datetime(2000, 1, 1)
# matplotlib.pylab.dates.date2num(d)
t = Time('2000-01-01 00:00:00', scale='utc')
assert np.allclose(t.plot_date, 730120.0, atol=1e-5, rtol=0)
# Round trip through epoch time
for scale in ('utc', 'tt'):
t = Time('2000:001', scale=scale)
t2 = Time(t.unix, scale=scale, format='unix')
assert getattr(t2, scale).iso == '2000-01-01 00:00:00.000'
# Test unix time. Values taken from http://en.wikipedia.org/wiki/Unix_time
t = Time('2013-05-20 21:18:46', scale='utc')
assert allclose_sec(t.unix, 1369084726.0)
assert allclose_sec(t.tt.unix, 1369084726.0)
# Values from issue #1118
t = Time('2004-09-16T23:59:59', scale='utc')
assert allclose_sec(t.unix, 1095379199.0)
class TestSofaErrors():
"""Test that erfa status return values are handled correctly"""
def test_bad_time(self):
iy = np.array([2000], dtype=np.intc)
im = np.array([2000], dtype=np.intc) # bad month
id = np.array([2000], dtype=np.intc) # bad day
with pytest.raises(ValueError): # bad month, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = -5000
im[0] = 2
with pytest.raises(ValueError): # bad year, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = 2000
with catch_warnings() as w:
djm0, djm = erfa.cal2jd(iy, im, id)
assert len(w) == 1
assert 'bad day (JD computed)' in str(w[0].message)
assert allclose_jd(djm0, [2400000.5])
assert allclose_jd(djm, [53574.])
class TestCopyReplicate():
"""Test issues related to copying and replicating data"""
def test_immutable_input(self):
"""Internals are never mutable."""
jds = np.array([2450000.5], dtype=np.double)
t = Time(jds, format='jd', scale='tai')
assert allclose_jd(t.jd, jds)
jds[0] = 2458654
assert not allclose_jd(t.jd, jds)
mjds = np.array([50000.0], dtype=np.double)
t = Time(mjds, format='mjd', scale='tai')
assert allclose_jd(t.jd, [2450000.5])
mjds[0] = 0.0
assert allclose_jd(t.jd, [2450000.5])
def test_replicate(self):
"""Test replicate method"""
t = Time(['2000:001'], format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.replicate()
assert t.yday == t2.yday
assert t.format == t2.format
assert t.scale == t2.scale
assert t.location == t2.location
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday == t2.yday
assert t.yday != t_yday # prove that it changed
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x == t2.location.x
assert t.location.x != t_loc_x # prove that it changed
def test_copy(self):
"""Test copy method"""
t = Time('2000:001', format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.copy()
assert t.yday == t2.yday
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are not sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday != t2.yday
assert t.yday == t_yday # prove that it did not change
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x != t2.location.x
assert t.location.x == t_loc_x # prove that it changed
def test_python_builtin_copy():
t = Time('2000:001', format='yday', scale='tai')
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
assert t.jd == t2.jd
assert t.jd == t3.jd
def test_now():
"""
Tests creating a Time object with the `now` class method.
"""
now = datetime.datetime.utcnow()
t = Time.now()
assert t.format == 'datetime'
assert t.scale == 'utc'
dt = t.datetime - now # a datetime.timedelta object
# this gives a .1 second margin between the `utcnow` call and the `Time`
# initializer, which is really way more generous than necessary - typical
# times are more like microseconds. But it seems safer in case some
# platforms have slow clock calls or something.
assert dt.total_seconds() < 0.1
def test_decimalyear():
t = Time('2001:001', format='yday')
assert t.decimalyear == 2001.0
t = Time(2000.0, [0.5, 0.75], format='decimalyear')
assert np.all(t.value == [2000.5, 2000.75])
jd0 = Time('2000:001').jd
jd1 = Time('2001:001').jd
d_jd = jd1 - jd0
assert np.all(t.jd == [jd0 + 0.5 * d_jd,
jd0 + 0.75 * d_jd])
def test_fits_year0():
t = Time(1721425.5, format='jd')
assert t.fits == '0001-01-01T00:00:00.000'
t = Time(1721425.5 - 366., format='jd')
assert t.fits == '+00000-01-01T00:00:00.000'
t = Time(1721425.5 - 366. - 365., format='jd')
assert t.fits == '-00001-01-01T00:00:00.000'
def test_fits_year10000():
t = Time(5373484.5, format='jd', scale='tai')
assert t.fits == '+10000-01-01T00:00:00.000'
t = Time(5373484.5 - 365., format='jd', scale='tai')
assert t.fits == '9999-01-01T00:00:00.000'
t = Time(5373484.5, -1./24./3600., format='jd', scale='tai')
assert t.fits == '9999-12-31T23:59:59.000'
def test_dir():
t = Time('2000:001', format='yday', scale='tai')
assert 'utc' in dir(t)
def test_bool():
"""Any Time object should evaluate to True unless it is empty [#3520]."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert bool(t) is True
assert bool(t[0]) is True
assert bool(t[:0]) is False
def test_len_size():
"""Check length of Time objects and that scalar ones do not have one."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert len(t) == 10 and t.size == 10
t1 = Time(np.arange(50000, 50010).reshape(2, 5), format='mjd', scale='utc')
assert len(t1) == 2 and t1.size == 10
# Can have length 1 or length 0 arrays.
t2 = t[:1]
assert len(t2) == 1 and t2.size == 1
t3 = t[:0]
assert len(t3) == 0 and t3.size == 0
# But cannot get length from scalar.
t4 = t[0]
with pytest.raises(TypeError) as err:
len(t4)
# Ensure we're not just getting the old error of
# "object of type 'float' has no len()".
assert 'Time' in str(err)
def test_TimeFormat_scale():
"""guard against recurrence of #1122, where TimeFormat class looses uses
attributes (delta_ut1_utc here), preventing conversion to unix, cxc"""
t = Time('1900-01-01', scale='ut1')
t.delta_ut1_utc = 0.0
t.unix
assert t.unix == t.utc.unix
@pytest.mark.remote_data
def test_scale_conversion():
Time(Time.now().cxcsec, format='cxcsec', scale='ut1')
def test_byteorder():
"""Ensure that bigendian and little-endian both work (closes #2942)"""
mjd = np.array([53000.00, 54000.00])
big_endian = mjd.astype('>f8')
little_endian = mjd.astype('<f8')
time_mjd = Time(mjd, format='mjd')
time_big = Time(big_endian, format='mjd')
time_little = Time(little_endian, format='mjd')
assert np.all(time_big == time_mjd)
assert np.all(time_little == time_mjd)
def test_datetime_tzinfo():
"""
Test #3160 that time zone info in datetime objects is respected.
"""
class TZm6(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=-6)
d = datetime.datetime(2002, 1, 2, 10, 3, 4, tzinfo=TZm6())
t = Time(d)
assert t.value == datetime.datetime(2002, 1, 2, 16, 3, 4)
def test_subfmts_regex():
"""
Test having a custom subfmts with a regular expression
"""
class TimeLongYear(TimeString):
name = 'longyear'
subfmts = (('date',
r'(?P<year>[+-]\d{5})-%m-%d', # hybrid
'{year:+06d}-{mon:02d}-{day:02d}'),)
t = Time('+02000-02-03', format='longyear')
assert t.value == '+02000-02-03'
assert t.jd == Time('2000-02-03').jd
def test_set_format_basic():
"""
Test basics of setting format attribute.
"""
for format, value in (('jd', 2451577.5),
('mjd', 51577.0),
('cxcsec', 65923264.184), # confirmed with Chandra.Time
('datetime', datetime.datetime(2000, 2, 3, 0, 0)),
('iso', '2000-02-03 00:00:00.000')):
t = Time('+02000-02-03', format='fits')
t0 = t.replicate()
t.format = format
assert t.value == value
# Internal jd1 and jd2 are preserved
assert t._time.jd1 is t0._time.jd1
assert t._time.jd2 is t0._time.jd2
def test_set_format_shares_subfmt():
"""
Set format and round trip through a format that shares out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='date_hms', precision=5)
tc = t.copy()
t.format = 'isot'
assert t.precision == 5
assert t.out_subfmt == 'date_hms'
assert t.value == '2000-02-03T00:00:00.00000'
t.format = 'fits'
assert t.value == tc.value
assert t.precision == 5
def test_set_format_does_not_share_subfmt():
"""
Set format and round trip through a format that does not share out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='longdate')
t.format = 'isot'
assert t.out_subfmt == '*' # longdate_hms not there, goes to default
assert t.value == '2000-02-03T00:00:00.000'
t.format = 'fits'
assert t.out_subfmt == '*'
assert t.value == '2000-02-03T00:00:00.000' # date_hms
def test_replicate_value_error():
"""
Passing a bad format to replicate should raise ValueError, not KeyError.
PR #3857.
"""
t1 = Time('2007:001', scale='tai')
with pytest.raises(ValueError) as err:
t1.replicate(format='definitely_not_a_valid_format')
assert 'format must be one of' in str(err)
def test_remove_astropy_time():
"""
Make sure that 'astropy_time' format is really gone after #3857. Kind of
silly test but just to be sure.
"""
t1 = Time('2007:001', scale='tai')
assert 'astropy_time' not in t1.FORMATS
with pytest.raises(ValueError) as err:
Time(t1, format='astropy_time')
assert 'format must be one of' in str(err)
def test_isiterable():
"""
Ensure that scalar `Time` instances are not reported as iterable by the
`isiterable` utility.
Regression test for https://github.com/astropy/astropy/issues/4048
"""
t1 = Time.now()
assert not isiterable(t1)
t2 = Time(['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00'],
format='iso', scale='utc')
assert isiterable(t2)
def test_to_datetime():
tz = TimezoneInfo(utc_offset=-10*u.hour, tzname='US/Hawaii')
# The above lines produces a `datetime.tzinfo` object similar to:
# tzinfo = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
assert tz_aware_datetime.time() == datetime.time(14, 0)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
with pytest.raises(ValueError) as e:
Time('2015-06-30 23:59:60.000').to_datetime()
assert 'does not support leap seconds' in str(e.message)
@pytest.mark.skipif('not HAS_PYTZ')
def test_to_datetime_pytz():
tz = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz_aware_datetime.time() == datetime.time(14, 0)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
def test_cache():
t = Time('2010-09-03 00:00:00')
t2 = Time('2010-09-03 00:00:00')
# Time starts out without a cache
assert 'cache' not in t._time.__dict__
# Access the iso format and confirm that the cached version is as expected
t.iso
assert t.cache['format']['iso'] == t2.iso
# Access the TAI scale and confirm that the cached version is as expected
t.tai
assert t.cache['scale']['tai'] == t2.tai
# New Time object after scale transform does not have a cache yet
assert 'cache' not in t.tt._time.__dict__
# Clear the cache
del t.cache
assert 'cache' not in t._time.__dict__
# Check accessing the cache creates an empty dictionary
assert not t.cache
assert 'cache' in t._time.__dict__
def test_epoch_date_jd_is_day_fraction():
"""
Ensure that jd1 and jd2 of an epoch Time are respect the (day, fraction) convention
(see #6638)
"""
t0 = Time("J2000", scale="tdb")
assert t0.jd1 == 2451545.0
assert t0.jd2 == 0.0
t1 = Time(datetime.datetime(2000, 1, 1, 12, 0, 0), scale="tdb")
assert t1.jd1 == 2451545.0
assert t1.jd2 == 0.0
def test_sum_is_equivalent():
"""
Ensure that two equal dates defined in different ways behave equally (#6638)
"""
t0 = Time("J2000", scale="tdb")
t1 = Time("2000-01-01 12:00:00", scale="tdb")
assert t0 == t1
assert (t0 + 1 * u.second) == (t1 + 1 * u.second)
def test_string_valued_columns():
# Columns have a nice shim that translates bytes to string as needed.
# Ensure Time can handle these. Use multi-d array just to be sure.
times = [[['{:04d}-{:02d}-{:02d}'.format(y, m, d) for d in range(1, 3)]
for m in range(5, 7)] for y in range(2012, 2014)]
cutf32 = Column(times)
cbytes = cutf32.astype('S')
tutf32 = Time(cutf32)
tbytes = Time(cbytes)
assert np.all(tutf32 == tbytes)
tutf32 = Time(Column(['B1950']))
tbytes = Time(Column([b'B1950']))
assert tutf32 == tbytes
# Regression tests for arrays with entries with unequal length. gh-6903.
times = Column([b'2012-01-01', b'2012-01-01T00:00:00'])
assert np.all(Time(times) == Time(['2012-01-01', '2012-01-01T00:00:00']))
def test_bytes_input():
tstring = '2011-01-02T03:04:05'
tbytes = b'2011-01-02T03:04:05'
assert tbytes.decode('ascii') == tstring
t0 = Time(tstring)
t1 = Time(tbytes)
assert t1 == t0
tarray = np.array(tbytes)
assert tarray.dtype.kind == 'S'
t2 = Time(tarray)
assert t2 == t0
def test_writeable_flag():
t = Time([1, 2, 3], format='cxcsec')
t[1] = 5.0
assert allclose_sec(t[1].value, 5.0)
t.writeable = False
with pytest.raises(ValueError) as err:
t[1] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err)
with pytest.raises(ValueError) as err:
t[:] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err)
t.writeable = True
t[1] = 10.0
assert allclose_sec(t[1].value, 10.0)
# Scalar is not writeable
t = Time('2000:001', scale='utc')
with pytest.raises(ValueError) as err:
t[()] = '2000:002'
assert 'scalar Time object is read-only.' in str(err)
# Transformed attribute is not writeable
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = t.tt # t2 is read-only now because t.tt is cached
with pytest.raises(ValueError) as err:
t2[0] = '2005:001'
assert 'Time object is read-only. Make a copy()' in str(err)
def test_setitem_location():
loc = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m)
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
# Succeeds because the right hand side makes no implication about
# location and just inherits t.location
t[0, 0] = 0
assert allclose_sec(t.value, [[0, 2], [3, 4]])
# Fails because the right hand side has location=None
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-1, format='cxcsec')
assert ('cannot set to Time with different location: '
'expected location={} and '
'got location=None'.format(loc[0])) in str(err)
# Succeeds because the right hand side correctly sets location
t[0, 0] = Time(-2, format='cxcsec', location=loc[0])
assert allclose_sec(t.value, [[-2, 2], [3, 4]])
# Fails because the right hand side has different location
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location={} and '
'got location={}'.format(loc[0], loc[1])) in str(err)
# Fails because the Time has None location and RHS has defined location
t = Time([[1, 2], [3, 4]], format='cxcsec')
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location=None and '
'got location={}'.format(loc[1])) in str(err)
# Broadcasting works
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
t[0, :] = Time([-3, -4], format='cxcsec', location=loc)
assert allclose_sec(t.value, [[-3, -4], [3, 4]])
def test_setitem_from_python_objects():
t = Time([[1, 2], [3, 4]], format='cxcsec')
assert t.cache == {}
t.iso
assert 'iso' in t.cache['format']
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:00:02.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Setting item clears cache
t[0, 1] = 100
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[3, 4]])
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:01:40.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Set with a float value
t.iso
t[1, :] = 200
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[200, 200]])
# Array of strings in yday format
t[:, 1] = ['1998:002', '1998:003']
assert allclose_sec(t.value, [[1, 86400 * 1],
[200, 86400 * 2]])
# Incompatible numeric value
t = Time(['2000:001', '2000:002'])
t[0] = '2001:001'
with pytest.raises(ValueError) as err:
t[0] = 100
assert 'cannot convert value to a compatible Time object' in str(err)
def test_setitem_from_time_objects():
"""Set from existing Time object.
"""
# Set from time object with different scale
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = Time(['2000:010'], scale='tai')
t[1] = t2[0]
assert t.value[1] == t2.utc.value[0]
# Time object with different scale and format
t = Time(['2000:001', '2000:002'], scale='utc')
t2.format = 'jyear'
t[1] = t2[0]
assert t.yday[1] == t2.utc.yday[0]
def test_setitem_bad_item():
t = Time([1, 2], format='cxcsec')
with pytest.raises(IndexError):
t['asdf'] = 3
def test_setitem_deltas():
"""Setting invalidates any transform deltas"""
t = Time([1, 2], format='cxcsec')
t.delta_tdb_tt = [1, 2]
t.delta_ut1_utc = [3, 4]
t[1] = 3
assert not hasattr(t, '_delta_tdb_tt')
assert not hasattr(t, '_delta_ut1_utc')
def test_subclass():
"""Check that we can initialize subclasses with a Time instance."""
# Ref: Issue gh-#7449 and PR gh-#7453.
class _Time(Time):
pass
t1 = Time('1999-01-01T01:01:01')
t2 = _Time(t1)
assert t2.__class__ == _Time
assert t1 == t2
def test_strftime_scalar():
"""Test of Time.strftime
"""
time_string = '2010-09-03 06:00:00'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S') == time_string
def test_strftime_array():
tstrings = ['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1995-12-31 23:59:60']
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S').tolist() == tstrings
def test_strftime_array_2():
tstrings = [['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1995-12-31 23:59:60']]
tstrings = np.array(tstrings)
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert np.all(t.strftime('%Y-%m-%d %H:%M:%S') == tstrings)
assert t.strftime('%Y-%m-%d %H:%M:%S').shape == tstrings.shape
def test_strftime_leapsecond():
time_string = '1995-12-31 23:59:60'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S') == time_string
def test_strptime_scalar():
"""Test of Time.strptime
"""
time_string = '2007-May-04 21:08:12'
time_object = Time('2007-05-04 21:08:12')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S')
assert t == time_object
def test_strptime_array():
"""Test of Time.strptime
"""
tstrings = [['1998-Jan-01 00:00:01', '1998-Jan-01 00:00:02'],
['1998-Jan-01 00:00:03', '1998-Jan-01 00:00:04']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1998-01-01 00:00:04']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_badinput():
tstrings = [1, 2, 3]
with pytest.raises(TypeError):
Time.strptime(tstrings, '%S')
def test_strptime_input_bytes_scalar():
time_string = b'2007-May-04 21:08:12'
time_object = Time('2007-05-04 21:08:12')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S')
assert t == time_object
def test_strptime_input_bytes_array():
tstrings = [[b'1998-Jan-01 00:00:01', b'1998-Jan-01 00:00:02'],
[b'1998-Jan-01 00:00:03', b'1998-Jan-01 00:00:04']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1998-01-01 00:00:04']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_leapsecond():
time_obj1 = Time('1995-12-31T23:59:60', format='isot')
time_obj2 = Time.strptime('1995-Dec-31 23:59:60', '%Y-%b-%d %H:%M:%S')
assert time_obj1 == time_obj2
def test_strptime_3_digit_year():
time_obj1 = Time('0995-12-31T00:00:00', format='isot')
time_obj2 = Time.strptime('0995-Dec-31 00:00:00', '%Y-%b-%d %H:%M:%S')
assert time_obj1 == time_obj2
def test_strptime_fracsec_scalar():
time_string = '2007-May-04 21:08:12.123'
time_object = Time('2007-05-04 21:08:12.123')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S.%f')
assert t == time_object
def test_strptime_fracsec_array():
"""Test of Time.strptime
"""
tstrings = [['1998-Jan-01 00:00:01.123', '1998-Jan-01 00:00:02.000001'],
['1998-Jan-01 00:00:03.000900', '1998-Jan-01 00:00:04.123456']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01.123', '1998-01-01 00:00:02.000001'],
['1998-01-01 00:00:03.000900', '1998-01-01 00:00:04.123456']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S.%f')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strftime_scalar_fracsec():
"""Test of Time.strftime
"""
time_string = '2010-09-03 06:00:00.123'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == time_string
def test_strftime_scalar_fracsec_precision():
time_string = '2010-09-03 06:00:00.123123123'
t = Time(time_string)
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == '2010-09-03 06:00:00.123'
t.precision = 9
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == '2010-09-03 06:00:00.123123123'
def test_strftime_array_fracsec():
tstrings = ['2010-09-03 00:00:00.123000', '2005-09-03 06:00:00.000001',
'1995-12-31 23:59:60.000900']
t = Time(tstrings)
t.precision = 6
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S.%f').tolist() == tstrings
def test_insert_time():
tm = Time([1, 2], format='unix')
# Insert a scalar using an auto-parsed string
tm2 = tm.insert(1, '1970-01-01 00:01:00')
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert scalar using a Time value
tm2 = tm.insert(1, Time('1970-01-01 00:01:00'))
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert length=1 array with a Time value
tm2 = tm.insert(1, [Time('1970-01-01 00:01:00')])
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert length=2 list with float values matching unix format.
# Also actually provide axis=0 unlike all other tests.
tm2 = tm.insert(1, [10, 20], axis=0)
assert np.all(tm2 == Time([1, 10, 20, 2], format='unix'))
# Insert length=2 np.array with float values matching unix format
tm2 = tm.insert(1, np.array([10, 20]))
assert np.all(tm2 == Time([1, 10, 20, 2], format='unix'))
# Insert length=2 np.array with float values at the end
tm2 = tm.insert(2, np.array([10, 20]))
assert np.all(tm2 == Time([1, 2, 10, 20], format='unix'))
# Insert length=2 np.array with float values at the beginning
# with a negative index
tm2 = tm.insert(-2, np.array([10, 20]))
assert np.all(tm2 == Time([10, 20, 1, 2], format='unix'))
def test_insert_exceptions():
tm = Time(1, format='unix')
with pytest.raises(TypeError) as err:
tm.insert(0, 50)
assert 'cannot insert into scalar' in str(err)
tm = Time([1, 2], format='unix')
with pytest.raises(ValueError) as err:
tm.insert(0, 50, axis=1)
assert 'axis must be 0' in str(err)
with pytest.raises(TypeError) as err:
tm.insert(slice(None), 50)
assert 'obj arg must be an integer' in str(err)
with pytest.raises(IndexError) as err:
tm.insert(-100, 50)
assert 'index -100 is out of bounds for axis 0 with size 2' in str(err)
def test_datetime64_no_format():
dt64 = np.datetime64('2000-01-02T03:04:05.123456789')
t = Time(dt64, scale='utc', precision=9)
assert t.iso == '2000-01-02 03:04:05.123456789'
assert t.datetime64 == dt64
assert t.value == dt64
|
1cf7ff22af7ba13814615758fc7cba809f44e9fdf69a2ee02d8b7972c6b97f90 | import functools
import pytest
import numpy as np
from astropy.time import Time, TimeDelta
allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
dt_tiny = TimeDelta(2. ** -52, format='jd')
def test_addition():
"""Check that an addition at the limit of precision (2^-52) is seen"""
t = Time(2455555., 0.5, format='jd', scale='utc')
t_dt = t + dt_tiny
assert t_dt.jd1 == t.jd1 and t_dt.jd2 != t.jd2
# Check that the addition is exactly reversed by the corresponding subtraction
t2 = t_dt - dt_tiny
assert t2.jd1 == t.jd1 and t2.jd2 == t.jd2
def test_mult_div():
"""Test precision with multiply and divide"""
dt_small = 6 * dt_tiny
# pick a number that will leave remainder if divided by 6.
dt_big = TimeDelta(20000., format='jd')
dt_big_small_by_6 = (dt_big + dt_small) / 6.
dt_frac = dt_big_small_by_6 - TimeDelta(3333., format='jd')
assert allclose_jd2(dt_frac.jd2, 0.33333333333333354)
def test_init_variations():
"""Check that 3 ways of specifying a time + small offset are equivalent"""
dt_tiny_sec = dt_tiny.jd2 * 86400.
t1 = Time(1e11, format='cxcsec') + dt_tiny
t2 = Time(1e11, dt_tiny_sec, format='cxcsec')
t3 = Time(dt_tiny_sec, 1e11, format='cxcsec')
assert t1.jd1 == t2.jd1
assert t1.jd2 == t3.jd2
assert t1.jd1 == t2.jd1
assert t1.jd2 == t3.jd2
def test_precision_exceeds_64bit():
"""
Check that Time object really holds more precision than float64 by looking at the
(naively) summed 64-bit result and asserting equality at the bit level.
"""
t1 = Time(1.23456789e11, format='cxcsec')
t2 = t1 + dt_tiny
assert t1.jd == t2.jd
def test_through_scale_change():
"""Check that precision holds through scale change (cxcsec is TT)"""
t0 = Time(1.0, format='cxcsec')
t1 = Time(1.23456789e11, format='cxcsec')
dt_tt = t1 - t0
dt_tai = t1.tai - t0.tai
assert allclose_jd(dt_tt.jd1, dt_tai.jd1)
assert allclose_jd2(dt_tt.jd2, dt_tai.jd2)
def test_iso_init():
"""Check when initializing from ISO date"""
t1 = Time('2000:001:00:00:00.00000001', scale='tai')
t2 = Time('3000:001:13:00:00.00000002', scale='tai')
dt = t2 - t1
assert allclose_jd2(dt.jd2, 13. / 24. + 1e-8 / 86400. - 1.0)
def test_jd1_is_mult_of_half_or_one():
"""
Check that jd1 is a multiple of 0.5 (note the difference from when Time is created
with a format like 'jd' or 'cxcsec', where jd1 is a multiple of 1.0).
"""
t1 = Time('2000:001:00:00:00.00000001', scale='tai')
assert np.round(t1.jd1 * 2) == t1.jd1 * 2
t1 = Time(1.23456789, 12345678.90123456, format='jd', scale='tai')
assert np.round(t1.jd1) == t1.jd1
@pytest.mark.xfail
def test_precision_neg():
"""
Check precision when jd1 is negative. Currently fails because ERFA routines use a
test like jd1 > jd2 to decide which component to update. Should be
abs(jd1) > abs(jd2).
"""
t1 = Time(-100000.123456, format='jd', scale='tt')
assert np.round(t1.jd1) == t1.jd1
t1_tai = t1.tai
assert np.round(t1_tai.jd1) == t1_tai.jd1
def test_precision_epoch():
"""
Check that input via epoch also has full precision, i.e., against
regression on https://github.com/astropy/astropy/pull/366
"""
t_utc = Time(range(1980, 2001), format='jyear', scale='utc')
t_tai = Time(range(1980, 2001), format='jyear', scale='tai')
dt = t_utc - t_tai
assert allclose_sec(dt.sec, np.round(dt.sec))
def test_leap_seconds_rounded_correctly():
"""Regression tests against #2083, where a leap second was rounded
incorrectly by the underlying ERFA routine."""
t = Time(['2012-06-30 23:59:59.413',
'2012-07-01 00:00:00.413'], scale='ut1', precision=3).utc
assert np.all(t.iso == np.array(['2012-06-30 23:59:60.000',
'2012-07-01 00:00:00.000']))
# with the bug, both yielded '2012-06-30 23:59:60.000'
|
5cb8f0a786df9d52bd387a3eddb31fd4fbb2dcd5ce4ab527ea26f500afc7dde8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy import units as u
from astropy.coordinates import EarthLocation, SkyCoord, solar_system_ephemeris
from astropy.time import Time, TimeDelta
try:
import jplephem # pylint: disable=W0611
except ImportError:
HAS_JPLEPHEM = False
else:
HAS_JPLEPHEM = True
class TestHelioBaryCentric():
"""
Verify time offsets to the solar system barycentre and the heliocentre.
Uses the WHT observing site.
Tests are against values returned at time of initial creation of these
routines. They agree to an independent SLALIB based implementation
to 20 microseconds.
"""
def setup(self):
wht = EarthLocation(342.12*u.deg, 28.758333333333333*u.deg, 2327*u.m)
self.obstime = Time("2013-02-02T23:00", location=wht)
self.obstime2 = Time("2013-08-02T23:00", location=wht)
self.obstimeArr = Time(["2013-02-02T23:00", "2013-08-02T23:00"], location=wht)
self.star = SkyCoord("08:08:08 +32:00:00", unit=(u.hour, u.degree),
frame='icrs')
def test_heliocentric(self):
hval = self.obstime.light_travel_time(self.star, 'heliocentric')
assert isinstance(hval, TimeDelta)
assert hval.scale == 'tdb'
assert abs(hval - 461.43037870502235 * u.s) < 1. * u.us
def test_barycentric(self):
bval = self.obstime.light_travel_time(self.star, 'barycentric')
assert isinstance(bval, TimeDelta)
assert bval.scale == 'tdb'
assert abs(bval - 460.58538779827836 * u.s) < 1. * u.us
def test_arrays(self):
bval1 = self.obstime.light_travel_time(self.star, 'barycentric')
bval2 = self.obstime2.light_travel_time(self.star, 'barycentric')
bval_arr = self.obstimeArr.light_travel_time(self.star, 'barycentric')
hval1 = self.obstime.light_travel_time(self.star, 'heliocentric')
hval2 = self.obstime2.light_travel_time(self.star, 'heliocentric')
hval_arr = self.obstimeArr.light_travel_time(self.star, 'heliocentric')
assert hval_arr[0]-hval1 < 1. * u.us
assert hval_arr[1]-hval2 < 1. * u.us
assert bval_arr[0]-bval1 < 1. * u.us
assert bval_arr[1]-bval2 < 1. * u.us
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_ephemerides(self):
bval1 = self.obstime.light_travel_time(self.star, 'barycentric')
with solar_system_ephemeris.set('jpl'):
bval2 = self.obstime.light_travel_time(self.star, 'barycentric', ephemeris='jpl')
# should differ by less than 0.1 ms, but not be the same
assert abs(bval1 - bval2) < 1. * u.ms
assert abs(bval1 - bval2) > 1. * u.us
|
800c2b2f7a9fcb3a52627f80b7f5f181169e17585ab4bee523d38853d78074d6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pickle
import numpy as np
from astropy.time import Time
class TestPickle():
"""Basic pickle test of time"""
def test_pickle(self):
times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00']
t1 = Time(times, scale='utc')
for prot in range(pickle.HIGHEST_PROTOCOL):
t1d = pickle.dumps(t1, prot)
t1l = pickle.loads(t1d)
assert np.all(t1l == t1)
t2 = Time('2012-06-30 12:00:00', scale='utc')
for prot in range(pickle.HIGHEST_PROTOCOL):
t2d = pickle.dumps(t2, prot)
t2l = pickle.loads(t2d)
assert t2l == t2
|
a06e795a075fe81fd5489f0471ecbcd783fd4e71681ec90e3a1624837a03fc4e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import pytest
import numpy as np
from astropy.time import Time
from astropy.utils.iers import iers # used in testing
allclose_jd = functools.partial(np.allclose, rtol=0, atol=1e-9)
allclose_sec = functools.partial(np.allclose, rtol=1e-15, atol=1e-4)
# 0.1 ms atol; IERS-B files change at that level.
try:
iers.IERS_A.open() # check if IERS_A is available
except OSError:
HAS_IERS_A = False
else:
HAS_IERS_A = True
class TestTimeUT1():
"""Test Time.ut1 using IERS tables"""
@pytest.mark.remote_data
def test_utc_to_ut1(self):
"Test conversion of UTC to UT1, making sure to include a leap second"""
t = Time(['2012-06-30 12:00:00', '2012-06-30 23:59:59',
'2012-06-30 23:59:60', '2012-07-01 00:00:00',
'2012-07-01 12:00:00'], scale='utc')
t_ut1_jd = t.ut1.jd
t_comp = np.array([2456108.9999932079,
2456109.4999816339,
2456109.4999932083,
2456109.5000047823,
2456110.0000047833])
assert allclose_jd(t_ut1_jd, t_comp)
t_back = t.ut1.utc
assert allclose_jd(t.jd, t_back.jd)
tnow = Time.now()
tnow.ut1
def test_ut1_to_utc(self):
"""Also test the reverse, around the leap second
(round-trip test closes #2077)"""
t = Time(['2012-06-30 12:00:00', '2012-06-30 23:59:59',
'2012-07-01 00:00:00', '2012-07-01 00:00:01',
'2012-07-01 12:00:00'], scale='ut1')
t_utc_jd = t.utc.jd
t_comp = np.array([2456109.0000010049,
2456109.4999836441,
2456109.4999952177,
2456109.5000067917,
2456109.9999952167])
assert allclose_jd(t_utc_jd, t_comp)
t_back = t.utc.ut1
assert allclose_jd(t.jd, t_back.jd)
def test_delta_ut1_utc(self):
"""Accessing delta_ut1_utc should try to get it from IERS
(closes #1924 partially)"""
t = Time('2012-06-30 12:00:00', scale='utc')
assert not hasattr(t, '_delta_ut1_utc')
# accessing delta_ut1_utc calculates it
assert allclose_sec(t.delta_ut1_utc, -0.58682110003124965)
# and keeps it around
assert allclose_sec(t._delta_ut1_utc, -0.58682110003124965)
@pytest.mark.skipif('not HAS_IERS_A')
class TestTimeUT1_IERSA():
def test_ut1_iers_A(self):
tnow = Time.now()
iers_a = iers.IERS_A.open()
tnow.delta_ut1_utc, status = iers_a.ut1_utc(tnow, return_status=True)
assert status == iers.FROM_IERS_A_PREDICTION
tnow_ut1_jd = tnow.ut1.jd
assert tnow_ut1_jd != tnow.jd
@pytest.mark.remote_data
class TestTimeUT1_IERS_Auto():
def test_ut1_iers_auto(self):
tnow = Time.now()
iers_a = iers.IERS_Auto.open()
tnow.delta_ut1_utc, status = iers_a.ut1_utc(tnow, return_status=True)
assert status == iers.FROM_IERS_A_PREDICTION
tnow_ut1_jd = tnow.ut1.jd
assert tnow_ut1_jd != tnow.jd
|
f4813650f6d063770784107b2ff8155dc7ab4bfd7cfd9ed1c31dad7d320857e1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.time import Time
class TestGuess():
"""Test guessing the input value format"""
def test_guess1(self):
times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00']
t = Time(times, scale='utc')
assert (repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>")
def test_guess2(self):
times = ['1999-01-01 00:00:00.123456789', '2010-01 00:00:00']
with pytest.raises(ValueError):
Time(times, scale='utc')
def test_guess3(self):
times = ['1999:001:00:00:00.123456789', '2010:001']
t = Time(times, scale='utc')
assert (repr(t) == "<Time object: scale='utc' format='yday' "
"value=['1999:001:00:00:00.123' '2010:001:00:00:00.000']>")
def test_guess4(self):
times = [10, 20]
with pytest.raises(ValueError):
Time(times, scale='utc')
|
b6c89913acd659d5bd6e2391acaab86dec2c2261a4619208aca0193e7c776cac | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import pytest
import numpy as np
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy.table import Column
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
class TestTimeQuantity():
"""Test Interaction of Time with Quantities"""
def test_valid_quantity_input(self):
"""Test Time formats that are allowed to take quantity input."""
q = 2450000.125*u.day
t1 = Time(q, format='jd', scale='utc')
assert t1.value == q.value
q2 = q.to(u.second)
t2 = Time(q2, format='jd', scale='utc')
assert t2.value == q.value == q2.to_value(u.day)
q3 = q-2400000.5*u.day
t3 = Time(q3, format='mjd', scale='utc')
assert t3.value == q3.value
# test we can deal with two quantity arguments, with different units
qs = 24.*36.*u.second
t4 = Time(q3, qs, format='mjd', scale='utc')
assert t4.value == (q3+qs).to_value(u.day)
qy = 1990.*u.yr
ty1 = Time(qy, format='jyear', scale='utc')
assert ty1.value == qy.value
ty2 = Time(qy.to(u.day), format='jyear', scale='utc')
assert ty2.value == qy.value
qy2 = 10.*u.yr
tcxc = Time(qy2, format='cxcsec')
assert tcxc.value == qy2.to_value(u.second)
tgps = Time(qy2, format='gps')
assert tgps.value == qy2.to_value(u.second)
tunix = Time(qy2, format='unix')
assert tunix.value == qy2.to_value(u.second)
qd = 2000.*365.*u.day
tplt = Time(qd, format='plot_date', scale='utc')
assert tplt.value == qd.value
def test_invalid_quantity_input(self):
with pytest.raises(u.UnitsError):
Time(2450000.*u.m, format='jd', scale='utc')
with pytest.raises(u.UnitsError):
Time(2450000.*u.dimensionless_unscaled, format='jd', scale='utc')
def test_column_with_and_without_units(self):
"""Ensure a Column without a unit is treated as an array [#3648]"""
a = np.arange(50000., 50010.)
ta = Time(a, format='mjd')
c1 = Column(np.arange(50000., 50010.), name='mjd')
tc1 = Time(c1, format='mjd')
assert np.all(ta == tc1)
c2 = Column(np.arange(50000., 50010.), name='mjd', unit='day')
tc2 = Time(c2, format='mjd')
assert np.all(ta == tc2)
c3 = Column(np.arange(50000., 50010.), name='mjd', unit='m')
with pytest.raises(u.UnitsError):
Time(c3, format='mjd')
def test_no_quantity_input_allowed(self):
"""Time formats that are not allowed to take Quantity input."""
qy = 1990.*u.yr
for fmt in ('iso', 'yday', 'datetime', 'byear',
'byear_str', 'jyear_str'):
with pytest.raises(ValueError):
Time(qy, format=fmt, scale='utc')
def test_valid_quantity_operations(self):
"""Check that adding a time-valued quantity to a Time gives a Time"""
t0 = Time(100000., format='cxcsec')
q1 = 10.*u.second
t1 = t0 + q1
assert isinstance(t1, Time)
assert t1.value == t0.value+q1.to_value(u.second)
q2 = 1.*u.day
t2 = t0 - q2
assert allclose_sec(t2.value, t0.value-q2.to_value(u.second))
# check broadcasting
q3 = np.arange(15.).reshape(3, 5) * u.hour
t3 = t0 - q3
assert t3.shape == q3.shape
assert allclose_sec(t3.value, t0.value-q3.to_value(u.second))
def test_invalid_quantity_operations(self):
"""Check that comparisons of Time with quantities does not work
(even for time-like, since we cannot compare Time to TimeDelta)"""
with pytest.raises(TypeError):
Time(100000., format='cxcsec') > 10.*u.m
with pytest.raises(TypeError):
Time(100000., format='cxcsec') > 10.*u.second
class TestTimeDeltaQuantity():
"""Test interaction of TimeDelta with Quantities"""
def test_valid_quantity_input(self):
"""Test that TimeDelta can take quantity input."""
q = 500.25*u.day
dt1 = TimeDelta(q, format='jd')
assert dt1.value == q.value
dt2 = TimeDelta(q, format='sec')
assert dt2.value == q.to_value(u.second)
dt3 = TimeDelta(q)
assert dt3.value == q.value
def test_invalid_quantity_input(self):
with pytest.raises(u.UnitsError):
TimeDelta(2450000.*u.m, format='jd')
with pytest.raises(u.UnitsError):
Time(2450000.*u.dimensionless_unscaled, format='jd', scale='utc')
with pytest.raises(TypeError):
TimeDelta(100, format='sec') > 10.*u.m
def test_quantity_output(self):
q = 500.25*u.day
dt = TimeDelta(q)
assert dt.to(u.day) == q
assert dt.to(u.second).value == q.to_value(u.second)
with pytest.raises(u.UnitsError):
dt.to(u.m)
def test_valid_quantity_operations1(self):
"""Check adding/substracting/comparing a time-valued quantity works
with a TimeDelta. Addition/subtraction should give TimeDelta"""
t0 = TimeDelta(106400., format='sec')
q1 = 10.*u.second
t1 = t0 + q1
assert isinstance(t1, TimeDelta)
assert t1.value == t0.value+q1.to_value(u.second)
q2 = 1.*u.day
t2 = t0 - q2
assert isinstance(t2, TimeDelta)
assert allclose_sec(t2.value, t0.value-q2.to_value(u.second))
# now comparisons
assert t0 > q1
assert t0 < 1.*u.yr
# and broadcasting
q3 = np.arange(12.).reshape(4, 3) * u.hour
t3 = t0 + q3
assert isinstance(t3, TimeDelta)
assert t3.shape == q3.shape
assert allclose_sec(t3.value, t0.value + q3.to_value(u.second))
def test_valid_quantity_operations2(self):
"""Check that TimeDelta is treated as a quantity where possible."""
t0 = TimeDelta(100000., format='sec')
f = 1./t0
assert isinstance(f, u.Quantity)
assert f.unit == 1./u.day
g = 10.*u.m/u.second**2
v = t0 * g
assert isinstance(v, u.Quantity)
assert u.allclose(v, t0.sec * g.value * u.m / u.second)
q = np.log10(t0/u.second)
assert isinstance(q, u.Quantity)
assert q.value == np.log10(t0.sec)
s = 1.*u.m
v = s/t0
assert isinstance(v, u.Quantity)
assert u.allclose(v, 1. / t0.sec * u.m / u.s)
t = 1.*u.s
t2 = t0 * t
assert isinstance(t2, u.Quantity)
assert u.allclose(t2, t0.sec * u.s ** 2)
t3 = [1] / t0
assert isinstance(t3, u.Quantity)
assert u.allclose(t3, 1 / (t0.sec * u.s))
# broadcasting
t1 = TimeDelta(np.arange(100000., 100012.).reshape(6, 2), format='sec')
f = np.array([1., 2.]) * u.cycle * u.Hz
phase = f * t1
assert isinstance(phase, u.Quantity)
assert phase.shape == t1.shape
assert u.allclose(phase, t1.sec * f.value * u.cycle)
q = t0 * t1
assert isinstance(q, u.Quantity)
assert np.all(q == t0.to(u.day) * t1.to(u.day))
q = t1 / t0
assert isinstance(q, u.Quantity)
assert np.all(q == t1.to(u.day) / t0.to(u.day))
def test_valid_quantity_operations3(self):
"""Test a TimeDelta remains one if possible."""
t0 = TimeDelta(10., format='jd')
q = 10. * u.one
t1 = q * t0
assert isinstance(t1, TimeDelta)
assert t1 == TimeDelta(100., format='jd')
t2 = t0 * q
assert isinstance(t2, TimeDelta)
assert t2 == TimeDelta(100., format='jd')
t3 = t0 / q
assert isinstance(t3, TimeDelta)
assert t3 == TimeDelta(1., format='jd')
q2 = 1. * u.percent
t4 = t0 * q2
assert isinstance(t4, TimeDelta)
assert abs(t4 - TimeDelta(0.1, format='jd')) < 1. * u.ns
q3 = 1. * u.hr / (36. * u.s)
t5 = q3 * t0
assert isinstance(t4, TimeDelta)
assert abs(t5 - TimeDelta(1000., format='jd')) < 1. * u.ns
# Test multiplication with a unit.
t6 = t0 * u.one
assert isinstance(t6, TimeDelta)
assert t6 == TimeDelta(10., format='jd')
t7 = u.one * t0
assert isinstance(t7, TimeDelta)
assert t7 == TimeDelta(10., format='jd')
t8 = t0 * ''
assert isinstance(t8, TimeDelta)
assert t8 == TimeDelta(10., format='jd')
t9 = '' * t0
assert isinstance(t9, TimeDelta)
assert t9 == TimeDelta(10., format='jd')
t10 = t0 / u.one
assert isinstance(t10, TimeDelta)
assert t6 == TimeDelta(10., format='jd')
t11 = t0 / ''
assert isinstance(t11, TimeDelta)
assert t11 == TimeDelta(10., format='jd')
t12 = t0 / [1]
assert isinstance(t12, TimeDelta)
assert t12 == TimeDelta(10., format='jd')
t13 = [1] * t0
assert isinstance(t13, TimeDelta)
assert t13 == TimeDelta(10., format='jd')
def test_invalid_quantity_operations(self):
"""Check comparisons of TimeDelta with non-time quantities fails."""
with pytest.raises(TypeError):
TimeDelta(100000., format='sec') > 10.*u.m
def test_invalid_quantity_operations2(self):
"""Check that operations with non-time/quantity fail."""
td = TimeDelta(100000., format='sec')
with pytest.raises(TypeError):
td * object()
with pytest.raises(TypeError):
td / object()
def test_invalid_quantity_broadcast(self):
"""Check broadcasting rules in interactions with Quantity."""
t0 = TimeDelta(np.arange(12.).reshape(4, 3), format='sec')
with pytest.raises(ValueError):
t0 + np.arange(4.) * u.s
class TestDeltaAttributes():
def test_delta_ut1_utc(self):
t = Time('2010-01-01 00:00:00', format='iso', scale='utc', precision=6)
t.delta_ut1_utc = 0.3 * u.s
assert t.ut1.iso == '2010-01-01 00:00:00.300000'
t.delta_ut1_utc = 0.4 / 60. * u.minute
assert t.ut1.iso == '2010-01-01 00:00:00.400000'
with pytest.raises(u.UnitsError):
t.delta_ut1_utc = 0.4 * u.m
# Also check that a TimeDelta works.
t.delta_ut1_utc = TimeDelta(0.3, format='sec')
assert t.ut1.iso == '2010-01-01 00:00:00.300000'
t.delta_ut1_utc = TimeDelta(0.5/24./3600., format='jd')
assert t.ut1.iso == '2010-01-01 00:00:00.500000'
def test_delta_tdb_tt(self):
t = Time('2010-01-01 00:00:00', format='iso', scale='tt', precision=6)
t.delta_tdb_tt = 20. * u.second
assert t.tdb.iso == '2010-01-01 00:00:20.000000'
t.delta_tdb_tt = 30. / 60. * u.minute
assert t.tdb.iso == '2010-01-01 00:00:30.000000'
with pytest.raises(u.UnitsError):
t.delta_tdb_tt = 0.4 * u.m
# Also check that a TimeDelta works.
t.delta_tdb_tt = TimeDelta(40., format='sec')
assert t.tdb.iso == '2010-01-01 00:00:40.000000'
t.delta_tdb_tt = TimeDelta(50./24./3600., format='jd')
assert t.tdb.iso == '2010-01-01 00:00:50.000000'
@pytest.mark.parametrize('q1, q2', ((5e8*u.s, None),
(5e17*u.ns, None),
(4e8*u.s, 1e17*u.ns),
(4e14*u.us, 1e17*u.ns)))
def test_quantity_conversion_rounding(q1, q2):
"""Check that no rounding errors are incurred by unit conversion.
This occurred before as quantities in seconds were converted to days
before trying to split them into two-part doubles. See gh-7622.
"""
t = Time('2001-01-01T00:00:00.', scale='tai')
expected = Time('2016-11-05T00:53:20.', scale='tai')
if q2 is None:
t0 = t + q1
else:
t0 = t + q1 + q2
assert abs(t0 - expected) < 20 * u.ps
dt1 = TimeDelta(q1, q2)
t1 = t + dt1
assert abs(t1 - expected) < 20 * u.ps
dt2 = TimeDelta(q1, q2, format='sec')
t2 = t + dt2
assert abs(t2 - expected) < 20 * u.ps
|
4c2de17d95968cb2325343e62bd958b9ad01b2ea0e014d07754aec9c5d3b1ef0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import operator
import pytest
import numpy as np
from astropy.time import Time, TimeDelta
class TestTimeComparisons():
"""Test Comparisons of Time and TimeDelta classes"""
def setup(self):
self.t1 = Time(np.arange(49995, 50005), format='mjd', scale='utc')
self.t2 = Time(np.arange(49000, 51000, 200), format='mjd', scale='utc')
def test_miscompares(self):
"""
If an incompatible object is compared to a Time object, == should
return False and != should return True. All other comparison
operators should raise a TypeError.
"""
t1 = Time('J2000', scale='utc')
for op, op_str in ((operator.ge, '>='),
(operator.gt, '>'),
(operator.le, '<='),
(operator.lt, '<')):
with pytest.raises(TypeError) as err:
op(t1, None)
# Keep == and != as they are specifically meant to test Time.__eq__
# and Time.__ne__
assert (t1 == None) is False # nopep8
assert (t1 != None) is True # nopep8
def test_time(self):
t1_lt_t2 = self.t1 < self.t2
assert np.all(t1_lt_t2 == np.array([False, False, False, False, False,
False, True, True, True, True]))
t1_ge_t2 = self.t1 >= self.t2
assert np.all(t1_ge_t2 != t1_lt_t2)
t1_le_t2 = self.t1 <= self.t2
assert np.all(t1_le_t2 == np.array([False, False, False, False, False,
True, True, True, True, True]))
t1_gt_t2 = self.t1 > self.t2
assert np.all(t1_gt_t2 != t1_le_t2)
t1_eq_t2 = self.t1 == self.t2
assert np.all(t1_eq_t2 == np.array([False, False, False, False, False,
True, False, False, False, False]))
t1_ne_t2 = self.t1 != self.t2
assert np.all(t1_ne_t2 != t1_eq_t2)
t1_0_gt_t2_0 = self.t1[0] > self.t2[0]
assert t1_0_gt_t2_0 is True
t1_0_gt_t2 = self.t1[0] > self.t2
assert np.all(t1_0_gt_t2 == np.array([True, True, True, True, True,
False, False, False, False,
False]))
t1_gt_t2_0 = self.t1 > self.t2[0]
assert np.all(t1_gt_t2_0 == np.array([True, True, True, True, True,
True, True, True, True, True]))
def test_timedelta(self):
dt = self.t2 - self.t1
with pytest.raises(TypeError):
self.t1 > dt
dt_gt_td0 = dt > TimeDelta(0., format='sec')
assert np.all(dt_gt_td0 == np.array([False, False, False, False, False,
False, True, True, True, True]))
|
6a02adfee81ea4866430f39655c0912dd5836e4d7b8efafd73cb4d15d7f10b93 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import copy
import pytest
import numpy as np
from astropy.time import Time
@pytest.fixture(scope="module", params=[True, False])
def masked(request):
# Could not figure out a better way to parametrize the setup method
global use_masked_data
use_masked_data = request.param
yield use_masked_data
class TestManipulation():
"""Manipulation of Time objects, ensuring attributes are done correctly."""
def setup(self):
mjd = np.arange(50000, 50010)
frac = np.arange(0., 0.999, 0.2)
if use_masked_data:
frac = np.ma.array(frac)
frac[1] = np.ma.masked
self.t0 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc')
self.t1 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=('45d', '50d'))
self.t2 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=(np.arange(len(frac)), np.arange(len(frac))))
# Note: location is along last axis only.
self.t2 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=(np.arange(len(frac)), np.arange(len(frac))))
def test_ravel(self, masked):
t0_ravel = self.t0.ravel()
assert t0_ravel.shape == (self.t0.size,)
assert np.all(t0_ravel.jd1 == self.t0.jd1.ravel())
assert np.may_share_memory(t0_ravel.jd1, self.t0.jd1)
assert t0_ravel.location is None
t1_ravel = self.t1.ravel()
assert t1_ravel.shape == (self.t1.size,)
assert np.all(t1_ravel.jd1 == self.t1.jd1.ravel())
assert np.may_share_memory(t1_ravel.jd1, self.t1.jd1)
assert t1_ravel.location is self.t1.location
t2_ravel = self.t2.ravel()
assert t2_ravel.shape == (self.t2.size,)
assert np.all(t2_ravel.jd1 == self.t2.jd1.ravel())
assert np.may_share_memory(t2_ravel.jd1, self.t2.jd1)
assert t2_ravel.location.shape == t2_ravel.shape
# Broadcasting and ravelling cannot be done without a copy.
assert not np.may_share_memory(t2_ravel.location, self.t2.location)
def test_flatten(self, masked):
t0_flatten = self.t0.flatten()
assert t0_flatten.shape == (self.t0.size,)
assert t0_flatten.location is None
# Flatten always makes a copy.
assert not np.may_share_memory(t0_flatten.jd1, self.t0.jd1)
t1_flatten = self.t1.flatten()
assert t1_flatten.shape == (self.t1.size,)
assert not np.may_share_memory(t1_flatten.jd1, self.t1.jd1)
assert t1_flatten.location is not self.t1.location
assert t1_flatten.location == self.t1.location
t2_flatten = self.t2.flatten()
assert t2_flatten.shape == (self.t2.size,)
assert not np.may_share_memory(t2_flatten.jd1, self.t2.jd1)
assert t2_flatten.location.shape == t2_flatten.shape
assert not np.may_share_memory(t2_flatten.location, self.t2.location)
def test_transpose(self, masked):
t0_transpose = self.t0.transpose()
assert t0_transpose.shape == (5, 10)
assert np.all(t0_transpose.jd1 == self.t0.jd1.transpose())
assert np.may_share_memory(t0_transpose.jd1, self.t0.jd1)
assert t0_transpose.location is None
t1_transpose = self.t1.transpose()
assert t1_transpose.shape == (5, 10)
assert np.all(t1_transpose.jd1 == self.t1.jd1.transpose())
assert np.may_share_memory(t1_transpose.jd1, self.t1.jd1)
assert t1_transpose.location is self.t1.location
t2_transpose = self.t2.transpose()
assert t2_transpose.shape == (5, 10)
assert np.all(t2_transpose.jd1 == self.t2.jd1.transpose())
assert np.may_share_memory(t2_transpose.jd1, self.t2.jd1)
assert t2_transpose.location.shape == t2_transpose.shape
assert np.may_share_memory(t2_transpose.location, self.t2.location)
# Only one check on T, since it just calls transpose anyway.
t2_T = self.t2.T
assert t2_T.shape == (5, 10)
assert np.all(t2_T.jd1 == self.t2.jd1.T)
assert np.may_share_memory(t2_T.jd1, self.t2.jd1)
assert t2_T.location.shape == t2_T.location.shape
assert np.may_share_memory(t2_T.location, self.t2.location)
def test_diagonal(self, masked):
t0_diagonal = self.t0.diagonal()
assert t0_diagonal.shape == (5,)
assert np.all(t0_diagonal.jd1 == self.t0.jd1.diagonal())
assert t0_diagonal.location is None
assert np.may_share_memory(t0_diagonal.jd1, self.t0.jd1)
t1_diagonal = self.t1.diagonal()
assert t1_diagonal.shape == (5,)
assert np.all(t1_diagonal.jd1 == self.t1.jd1.diagonal())
assert t1_diagonal.location is self.t1.location
assert np.may_share_memory(t1_diagonal.jd1, self.t1.jd1)
t2_diagonal = self.t2.diagonal()
assert t2_diagonal.shape == (5,)
assert np.all(t2_diagonal.jd1 == self.t2.jd1.diagonal())
assert t2_diagonal.location.shape == t2_diagonal.shape
assert np.may_share_memory(t2_diagonal.jd1, self.t2.jd1)
assert np.may_share_memory(t2_diagonal.location, self.t2.location)
def test_swapaxes(self, masked):
t0_swapaxes = self.t0.swapaxes(0, 1)
assert t0_swapaxes.shape == (5, 10)
assert np.all(t0_swapaxes.jd1 == self.t0.jd1.swapaxes(0, 1))
assert np.may_share_memory(t0_swapaxes.jd1, self.t0.jd1)
assert t0_swapaxes.location is None
t1_swapaxes = self.t1.swapaxes(0, 1)
assert t1_swapaxes.shape == (5, 10)
assert np.all(t1_swapaxes.jd1 == self.t1.jd1.swapaxes(0, 1))
assert np.may_share_memory(t1_swapaxes.jd1, self.t1.jd1)
assert t1_swapaxes.location is self.t1.location
t2_swapaxes = self.t2.swapaxes(0, 1)
assert t2_swapaxes.shape == (5, 10)
assert np.all(t2_swapaxes.jd1 == self.t2.jd1.swapaxes(0, 1))
assert np.may_share_memory(t2_swapaxes.jd1, self.t2.jd1)
assert t2_swapaxes.location.shape == t2_swapaxes.shape
assert np.may_share_memory(t2_swapaxes.location, self.t2.location)
def test_reshape(self, masked):
t0_reshape = self.t0.reshape(5, 2, 5)
assert t0_reshape.shape == (5, 2, 5)
assert np.all(t0_reshape.jd1 == self.t0._time.jd1.reshape(5, 2, 5))
assert np.all(t0_reshape.jd2 == self.t0._time.jd2.reshape(5, 2, 5))
assert np.may_share_memory(t0_reshape.jd1, self.t0.jd1)
assert np.may_share_memory(t0_reshape.jd2, self.t0.jd2)
assert t0_reshape.location is None
t1_reshape = self.t1.reshape(2, 5, 5)
assert t1_reshape.shape == (2, 5, 5)
assert np.all(t1_reshape.jd1 == self.t1.jd1.reshape(2, 5, 5))
assert np.may_share_memory(t1_reshape.jd1, self.t1.jd1)
assert t1_reshape.location is self.t1.location
# For reshape(5, 2, 5), the location array can remain the same.
t2_reshape = self.t2.reshape(5, 2, 5)
assert t2_reshape.shape == (5, 2, 5)
assert np.all(t2_reshape.jd1 == self.t2.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t2_reshape.jd1, self.t2.jd1)
assert t2_reshape.location.shape == t2_reshape.shape
assert np.may_share_memory(t2_reshape.location, self.t2.location)
# But for reshape(5, 5, 2), location has to be broadcast and copied.
t2_reshape2 = self.t2.reshape(5, 5, 2)
assert t2_reshape2.shape == (5, 5, 2)
assert np.all(t2_reshape2.jd1 == self.t2.jd1.reshape(5, 5, 2))
assert np.may_share_memory(t2_reshape2.jd1, self.t2.jd1)
assert t2_reshape2.location.shape == t2_reshape2.shape
assert not np.may_share_memory(t2_reshape2.location, self.t2.location)
t2_reshape_t = self.t2.reshape(10, 5).T
assert t2_reshape_t.shape == (5, 10)
assert np.may_share_memory(t2_reshape_t.jd1, self.t2.jd1)
assert t2_reshape_t.location.shape == t2_reshape_t.shape
assert np.may_share_memory(t2_reshape_t.location, self.t2.location)
# Finally, reshape in a way that cannot be a view.
t2_reshape_t_reshape = t2_reshape_t.reshape(10, 5)
assert t2_reshape_t_reshape.shape == (10, 5)
assert not np.may_share_memory(t2_reshape_t_reshape.jd1, self.t2.jd1)
assert (t2_reshape_t_reshape.location.shape ==
t2_reshape_t_reshape.shape)
assert not np.may_share_memory(t2_reshape_t_reshape.location,
t2_reshape_t.location)
def test_shape_setting(self, masked):
t0_reshape = self.t0.copy()
mjd = t0_reshape.mjd # Creates a cache of the mjd attribute
t0_reshape.shape = (5, 2, 5)
assert t0_reshape.shape == (5, 2, 5)
assert mjd.shape != t0_reshape.mjd.shape # Cache got cleared
assert np.all(t0_reshape.jd1 == self.t0._time.jd1.reshape(5, 2, 5))
assert np.all(t0_reshape.jd2 == self.t0._time.jd2.reshape(5, 2, 5))
assert t0_reshape.location is None
# But if the shape doesn't work, one should get an error.
t0_reshape_t = t0_reshape.T
with pytest.raises(AttributeError):
t0_reshape_t.shape = (10, 5)
# check no shape was changed.
assert t0_reshape_t.shape == t0_reshape.T.shape
assert t0_reshape_t.jd1.shape == t0_reshape.T.shape
assert t0_reshape_t.jd2.shape == t0_reshape.T.shape
t1_reshape = self.t1.copy()
t1_reshape.shape = (2, 5, 5)
assert t1_reshape.shape == (2, 5, 5)
assert np.all(t1_reshape.jd1 == self.t1.jd1.reshape(2, 5, 5))
# location is a single element, so its shape should not change.
assert t1_reshape.location.shape == ()
# For reshape(5, 2, 5), the location array can remain the same.
# Note that we need to work directly on self.t2 here, since any
# copy would cause location to have the full shape.
self.t2.shape = (5, 2, 5)
assert self.t2.shape == (5, 2, 5)
assert self.t2.jd1.shape == (5, 2, 5)
assert self.t2.jd2.shape == (5, 2, 5)
assert self.t2.location.shape == (5, 2, 5)
assert self.t2.location.strides == (0, 0, 24)
# But for reshape(50), location would need to be copied, so this
# should fail.
oldshape = self.t2.shape
with pytest.raises(AttributeError):
self.t2.shape = (50,)
# check no shape was changed.
assert self.t2.jd1.shape == oldshape
assert self.t2.jd2.shape == oldshape
assert self.t2.location.shape == oldshape
# reset t2 to its original.
self.setup()
def test_squeeze(self, masked):
t0_squeeze = self.t0.reshape(5, 1, 2, 1, 5).squeeze()
assert t0_squeeze.shape == (5, 2, 5)
assert np.all(t0_squeeze.jd1 == self.t0.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t0_squeeze.jd1, self.t0.jd1)
assert t0_squeeze.location is None
t1_squeeze = self.t1.reshape(1, 5, 1, 2, 5).squeeze()
assert t1_squeeze.shape == (5, 2, 5)
assert np.all(t1_squeeze.jd1 == self.t1.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t1_squeeze.jd1, self.t1.jd1)
assert t1_squeeze.location is self.t1.location
t2_squeeze = self.t2.reshape(1, 1, 5, 2, 5, 1, 1).squeeze()
assert t2_squeeze.shape == (5, 2, 5)
assert np.all(t2_squeeze.jd1 == self.t2.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t2_squeeze.jd1, self.t2.jd1)
assert t2_squeeze.location.shape == t2_squeeze.shape
assert np.may_share_memory(t2_squeeze.location, self.t2.location)
def test_add_dimension(self, masked):
t0_adddim = self.t0[:, np.newaxis, :]
assert t0_adddim.shape == (10, 1, 5)
assert np.all(t0_adddim.jd1 == self.t0.jd1[:, np.newaxis, :])
assert np.may_share_memory(t0_adddim.jd1, self.t0.jd1)
assert t0_adddim.location is None
t1_adddim = self.t1[:, :, np.newaxis]
assert t1_adddim.shape == (10, 5, 1)
assert np.all(t1_adddim.jd1 == self.t1.jd1[:, :, np.newaxis])
assert np.may_share_memory(t1_adddim.jd1, self.t1.jd1)
assert t1_adddim.location is self.t1.location
t2_adddim = self.t2[:, :, np.newaxis]
assert t2_adddim.shape == (10, 5, 1)
assert np.all(t2_adddim.jd1 == self.t2.jd1[:, :, np.newaxis])
assert np.may_share_memory(t2_adddim.jd1, self.t2.jd1)
assert t2_adddim.location.shape == t2_adddim.shape
assert np.may_share_memory(t2_adddim.location, self.t2.location)
def test_take(self, masked):
t0_take = self.t0.take((5, 2))
assert t0_take.shape == (2,)
assert np.all(t0_take.jd1 == self.t0._time.jd1.take((5, 2)))
assert t0_take.location is None
t1_take = self.t1.take((2, 4), axis=1)
assert t1_take.shape == (10, 2)
assert np.all(t1_take.jd1 == self.t1.jd1.take((2, 4), axis=1))
assert t1_take.location is self.t1.location
t2_take = self.t2.take((1, 3, 7), axis=0)
assert t2_take.shape == (3, 5)
assert np.all(t2_take.jd1 == self.t2.jd1.take((1, 3, 7), axis=0))
assert t2_take.location.shape == t2_take.shape
t2_take2 = self.t2.take((5, 15))
assert t2_take2.shape == (2,)
assert np.all(t2_take2.jd1 == self.t2.jd1.take((5, 15)))
assert t2_take2.location.shape == t2_take2.shape
def test_broadcast(self, masked):
"""Test using a callable method."""
t0_broadcast = self.t0._apply(np.broadcast_to, shape=(3, 10, 5))
assert t0_broadcast.shape == (3, 10, 5)
assert np.all(t0_broadcast.jd1 == self.t0.jd1)
assert np.may_share_memory(t0_broadcast.jd1, self.t0.jd1)
assert t0_broadcast.location is None
t1_broadcast = self.t1._apply(np.broadcast_to, shape=(3, 10, 5))
assert t1_broadcast.shape == (3, 10, 5)
assert np.all(t1_broadcast.jd1 == self.t1.jd1)
assert np.may_share_memory(t1_broadcast.jd1, self.t1.jd1)
assert t1_broadcast.location is self.t1.location
t2_broadcast = self.t2._apply(np.broadcast_to, shape=(3, 10, 5))
assert t2_broadcast.shape == (3, 10, 5)
assert np.all(t2_broadcast.jd1 == self.t2.jd1)
assert np.may_share_memory(t2_broadcast.jd1, self.t2.jd1)
assert t2_broadcast.location.shape == t2_broadcast.shape
assert np.may_share_memory(t2_broadcast.location, self.t2.location)
class TestArithmetic():
"""Arithmetic on Time objects, using both doubles."""
kwargs = ({}, {'axis': None}, {'axis': 0}, {'axis': 1}, {'axis': 2})
functions = ('min', 'max', 'sort')
def setup(self):
mjd = np.arange(50000, 50100, 10).reshape(2, 5, 1)
frac = np.array([0.1, 0.1+1.e-15, 0.1-1.e-15, 0.9+2.e-16, 0.9])
if use_masked_data:
frac = np.ma.array(frac)
frac[1] = np.ma.masked
self.t0 = Time(mjd, frac, format='mjd', scale='utc')
# Define arrays with same ordinal properties
frac = np.array([1, 2, 0, 4, 3])
if use_masked_data:
frac = np.ma.array(frac)
frac[1] = np.ma.masked
self.t1 = Time(mjd + frac, format='mjd', scale='utc')
self.jd = mjd + frac
@pytest.mark.parametrize('kw, func', itertools.product(kwargs, functions))
def test_argfuncs(self, kw, func, masked):
"""
Test that np.argfunc(jd, **kw) is the same as t0.argfunc(**kw) where
jd is a similarly shaped array with the same ordinal properties but
all integer values. Also test the same for t1 which has the same
integral values as jd.
"""
t0v = getattr(self.t0, 'arg' + func)(**kw)
t1v = getattr(self.t1, 'arg' + func)(**kw)
jdv = getattr(np, 'arg' + func)(self.jd, **kw)
if self.t0.masked and kw == {'axis': None} and func == 'sort':
t0v = np.ma.array(t0v, mask=self.t0.mask.reshape(t0v.shape)[t0v])
t1v = np.ma.array(t1v, mask=self.t1.mask.reshape(t1v.shape)[t1v])
jdv = np.ma.array(jdv, mask=self.jd.mask.reshape(jdv.shape)[jdv])
assert np.all(t0v == jdv)
assert np.all(t1v == jdv)
assert t0v.shape == jdv.shape
assert t1v.shape == jdv.shape
@pytest.mark.parametrize('kw, func', itertools.product(kwargs, functions))
def test_funcs(self, kw, func, masked):
"""
Test that np.func(jd, **kw) is the same as t1.func(**kw) where
jd is a similarly shaped array and the same integral values.
"""
t1v = getattr(self.t1, func)(**kw)
jdv = getattr(np, func)(self.jd, **kw)
assert np.all(t1v.value == jdv)
assert t1v.shape == jdv.shape
def test_argmin(self, masked):
assert self.t0.argmin() == 2
assert np.all(self.t0.argmin(axis=0) == 0)
assert np.all(self.t0.argmin(axis=1) == 0)
assert np.all(self.t0.argmin(axis=2) == 2)
def test_argmax(self, masked):
assert self.t0.argmax() == self.t0.size - 2
if masked:
# The 0 is where all entries are masked in that axis
assert np.all(self.t0.argmax(axis=0) == [1, 0, 1, 1, 1])
assert np.all(self.t0.argmax(axis=1) == [4, 0, 4, 4, 4])
else:
assert np.all(self.t0.argmax(axis=0) == 1)
assert np.all(self.t0.argmax(axis=1) == 4)
assert np.all(self.t0.argmax(axis=2) == 3)
def test_argsort(self, masked):
order = [2, 0, 4, 3, 1] if masked else [2, 0, 1, 4, 3]
assert np.all(self.t0.argsort() == np.array(order))
assert np.all(self.t0.argsort(axis=0) == np.arange(2).reshape(2, 1, 1))
assert np.all(self.t0.argsort(axis=1) == np.arange(5).reshape(5, 1))
assert np.all(self.t0.argsort(axis=2) == np.array(order))
ravel = np.arange(50).reshape(-1, 5)[:, order].ravel()
if masked:
t0v = self.t0.argsort(axis=None)
# Manually remove elements in ravel that correspond to masked
# entries in self.t0. This removes the 10 entries that are masked
# which show up at the end of the list.
mask = self.t0.mask.ravel()[ravel]
ravel = ravel[~mask]
assert np.all(t0v[:-10] == ravel)
else:
assert np.all(self.t0.argsort(axis=None) == ravel)
def test_min(self, masked):
assert self.t0.min() == self.t0[0, 0, 2]
assert np.all(self.t0.min(0) == self.t0[0])
assert np.all(self.t0.min(1) == self.t0[:, 0])
assert np.all(self.t0.min(2) == self.t0[:, :, 2])
assert self.t0.min(0).shape == (5, 5)
assert self.t0.min(0, keepdims=True).shape == (1, 5, 5)
assert self.t0.min(1).shape == (2, 5)
assert self.t0.min(1, keepdims=True).shape == (2, 1, 5)
assert self.t0.min(2).shape == (2, 5)
assert self.t0.min(2, keepdims=True).shape == (2, 5, 1)
def test_max(self, masked):
assert self.t0.max() == self.t0[-1, -1, -2]
assert np.all(self.t0.max(0) == self.t0[1])
assert np.all(self.t0.max(1) == self.t0[:, 4])
assert np.all(self.t0.max(2) == self.t0[:, :, 3])
assert self.t0.max(0).shape == (5, 5)
assert self.t0.max(0, keepdims=True).shape == (1, 5, 5)
def test_ptp(self, masked):
assert self.t0.ptp() == self.t0.max() - self.t0.min()
assert np.all(self.t0.ptp(0) == self.t0.max(0) - self.t0.min(0))
assert self.t0.ptp(0).shape == (5, 5)
assert self.t0.ptp(0, keepdims=True).shape == (1, 5, 5)
def test_sort(self, masked):
order = [2, 0, 4, 3, 1] if masked else [2, 0, 1, 4, 3]
assert np.all(self.t0.sort() == self.t0[:, :, order])
assert np.all(self.t0.sort(0) == self.t0)
assert np.all(self.t0.sort(1) == self.t0)
assert np.all(self.t0.sort(2) == self.t0[:, :, order])
if not masked:
assert np.all(self.t0.sort(None) ==
self.t0[:, :, order].ravel())
# Bit superfluous, but good to check.
assert np.all(self.t0.sort(-1)[:, :, 0] == self.t0.min(-1))
assert np.all(self.t0.sort(-1)[:, :, -1] == self.t0.max(-1))
def test_regression():
# For #5225, where a time with a single-element delta_ut1_utc could not
# be copied, flattened, or ravelled. (For copy, it is in test_basic.)
t = Time(49580.0, scale='tai', format='mjd')
t_ut1 = t.ut1
t_ut1_copy = copy.deepcopy(t_ut1)
assert type(t_ut1_copy.delta_ut1_utc) is np.ndarray
t_ut1_flatten = t_ut1.flatten()
assert type(t_ut1_flatten.delta_ut1_utc) is np.ndarray
t_ut1_ravel = t_ut1.ravel()
assert type(t_ut1_ravel.delta_ut1_utc) is np.ndarray
assert t_ut1_copy.delta_ut1_utc == t_ut1.delta_ut1_utc
|
babc59a038f06de2378c903fe9aa59d95f71e00bb3191986065d185f36a59b6a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import numpy as np
from astropy.utils.compat import NUMPY_LT_1_14
from astropy.tests.helper import pytest
from astropy.time import Time
from astropy.table import Table
try:
import h5py # pylint: disable=W0611
except ImportError:
HAS_H5PY = False
else:
HAS_H5PY = True
try:
import yaml # pylint: disable=W0611
HAS_YAML = True
except ImportError:
HAS_YAML = False
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
is_masked = np.ma.is_masked
def test_simple():
t = Time([1, 2, 3], format='cxcsec')
assert t.masked is False
assert np.all(t.mask == [False, False, False])
# Before masking, format output is not a masked array (it is an ndarray
# like always)
assert not isinstance(t.value, np.ma.MaskedArray)
assert not isinstance(t.unix, np.ma.MaskedArray)
t[2] = np.ma.masked
assert t.masked is True
assert np.all(t.mask == [False, False, True])
assert allclose_sec(t.value[:2], [1, 2])
assert is_masked(t.value[2])
assert is_masked(t[2].value)
# After masking format output is a masked array
assert isinstance(t.value, np.ma.MaskedArray)
assert isinstance(t.unix, np.ma.MaskedArray)
# Todo : test all formats
def test_scalar_init():
t = Time('2000:001')
assert t.masked is False
assert t.mask == np.array(False)
def test_mask_not_writeable():
t = Time('2000:001')
with pytest.raises(AttributeError) as err:
t.mask = True
assert "can't set attribute" in str(err)
t = Time(['2000:001'])
with pytest.raises(ValueError) as err:
t.mask[0] = True
assert "assignment destination is read-only" in str(err)
def test_str():
t = Time(['2000:001', '2000:002'])
t[1] = np.ma.masked
assert str(t) == "['2000:001:00:00:00.000' --]"
assert repr(t) == "<Time object: scale='utc' format='yday' value=['2000:001:00:00:00.000' --]>"
if NUMPY_LT_1_14:
expected = ["masked_array(data = ['2000-01-01 00:00:00.000' --],",
" mask = [False True],",
" fill_value = N/A)"]
else:
expected = ["masked_array(data=['2000-01-01 00:00:00.000', --],",
' mask=[False, True],',
" fill_value='N/A',",
" dtype='<U23')"]
# Note that we need to take care to allow for big-endian platforms,
# for which the dtype will be >U23 instead of <U23, which we do with
# the call to replace().
assert repr(t.iso).replace('>U23', '<U23').splitlines() == expected
# Assign value to unmask
t[1] = '2000:111'
assert str(t) == "['2000:001:00:00:00.000' '2000:111:00:00:00.000']"
assert t.masked is False
def test_transform():
t = Time(['2000:001', '2000:002'])
t[1] = np.ma.masked
# Change scale (this tests the ERFA machinery with masking as well)
t_ut1 = t.ut1
assert is_masked(t_ut1.value[1])
assert not is_masked(t_ut1.value[0])
assert np.all(t_ut1.mask == [False, True])
# Change format
t_unix = t.unix
assert is_masked(t_unix[1])
assert not is_masked(t_unix[0])
assert np.all(t_unix.mask == [False, True])
def test_masked_input():
v0 = np.ma.MaskedArray([[1, 2], [3, 4]]) # No masked elements
v1 = np.ma.MaskedArray([[1, 2], [3, 4]], mask=[[True, False], [False, False]])
v2 = np.ma.MaskedArray([[10, 20], [30, 40]], mask=[[False, False], [False, True]])
# Init from various combinations of masked arrays
t = Time(v0, format='cxcsec')
assert np.ma.allclose(t.value, v0)
assert np.all(t.mask == [[False, False], [False, False]])
assert t.masked is False
t = Time(v1, format='cxcsec')
assert np.ma.allclose(t.value, v1)
assert np.all(t.mask == v1.mask)
assert np.all(t.value.mask == v1.mask)
assert t.masked is True
t = Time(v1, v2, format='cxcsec')
assert np.ma.allclose(t.value, v1 + v2)
assert np.all(t.mask == (v1 + v2).mask)
assert t.masked is True
t = Time(v0, v1, format='cxcsec')
assert np.ma.allclose(t.value, v0 + v1)
assert np.all(t.mask == (v0 + v1).mask)
assert t.masked is True
t = Time(0, v2, format='cxcsec')
assert np.ma.allclose(t.value, v2)
assert np.all(t.mask == v2.mask)
assert t.masked is True
# Init from a string masked array
t_iso = t.iso
t2 = Time(t_iso)
assert np.all(t2.value == t_iso)
assert np.all(t2.mask == v2.mask)
assert t2.masked is True
def test_serialize_fits_masked(tmpdir):
tm = Time([1, 2, 3], format='cxcsec')
tm[1] = np.ma.masked
fn = str(tmpdir.join('tempfile.fits'))
t = Table([tm])
t.write(fn)
t2 = Table.read(fn, astropy_native=True)
# Time FITS handling does not current round-trip format in FITS
t2['col0'].format = tm.format
assert t2['col0'].masked
assert np.all(t2['col0'].mask == [False, True, False])
assert np.all(t2['col0'].value == t['col0'].value)
@pytest.mark.skipif(not HAS_YAML or not HAS_H5PY,
reason='Need both h5py and yaml')
def test_serialize_hdf5_masked(tmpdir):
tm = Time([1, 2, 3], format='cxcsec')
tm[1] = np.ma.masked
fn = str(tmpdir.join('tempfile.hdf5'))
t = Table([tm])
t.write(fn, path='root', serialize_meta=True)
t2 = Table.read(fn)
assert t2['col0'].masked
assert np.all(t2['col0'].mask == [False, True, False])
assert np.all(t2['col0'].value == t['col0'].value)
@pytest.mark.skipif('not HAS_YAML')
def test_serialize_ecsv_masked(tmpdir):
tm = Time([1, 2, 3], format='cxcsec')
tm[1] = np.ma.masked
# Serializing in the default way for ECSV fails to round-trip
# because it writes out a "nan" instead of "". But for jd1/jd2
# this works OK.
tm.info.serialize_method['ecsv'] = 'jd1_jd2'
fn = str(tmpdir.join('tempfile.ecsv'))
t = Table([tm])
t.write(fn)
t2 = Table.read(fn)
assert t2['col0'].masked
assert np.all(t2['col0'].mask == [False, True, False])
# Serializing floats to ASCII loses some precision so use allclose
# and 1e-7 seconds tolerance.
assert np.allclose(t2['col0'].value, t['col0'].value, rtol=0, atol=1e-7)
|
372b714dfc0f6ee0c72562d588b655770ee007ce6da04b5f5640604cf2077aa8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import itertools
import pytest
import numpy as np
from astropy.time import Time
from astropy.time.core import SIDEREAL_TIME_MODELS
allclose_hours = functools.partial(np.allclose, rtol=1e-15, atol=3e-8)
# 0.1 ms atol; IERS-B files change at that level.
within_1_second = functools.partial(np.allclose, rtol=1., atol=1./3600.)
within_2_seconds = functools.partial(np.allclose, rtol=1., atol=2./3600.)
def test_doc_string_contains_models():
"""The doc string is formatted; this ensures this remains working."""
for kind in ('mean', 'apparent'):
for model in SIDEREAL_TIME_MODELS[kind]:
assert model in Time.sidereal_time.__doc__
class TestERFATestCases():
"""Test that we reproduce the test cases given in erfa/src/t_erfa_c.c"""
# all tests use the following JD inputs
time_ut1 = Time(2400000.5, 53736.0, scale='ut1', format='jd')
time_tt = Time(2400000.5, 53736.0, scale='tt', format='jd')
# but tt!=ut1 at these dates, unlike what is assumed, so we cannot
# reproduce this exactly. Now it does not really matter,
# but may as well fake this (and avoid IERS table lookup here)
time_ut1.delta_ut1_utc = 0.
time_ut1.delta_ut1_utc = 24*3600*(time_ut1.tt.jd2-time_tt.jd2)
assert np.allclose(time_ut1.tt.jd2 - time_tt.jd2, 0., atol=1.e-14)
@pytest.mark.parametrize('erfa_test_input',
((1.754174972210740592, 1e-12, "eraGmst00"),
(1.754174971870091203, 1e-12, "eraGmst06"),
(1.754174981860675096, 1e-12, "eraGmst82"),
(1.754166138018281369, 1e-12, "eraGst00a"),
(1.754166136510680589, 1e-12, "eraGst00b"),
(1.754166137675019159, 1e-12, "eraGst06a"),
(1.754166136020645203, 1e-12, "eraGst94")))
def test_iau_models(self, erfa_test_input):
result, precision, name = erfa_test_input
if name[4] == 'm':
kind = 'mean'
model_name = 'IAU{0:2d}{1:s}'.format(20 if name[7] == '0' else 19,
name[7:])
else:
kind = 'apparent'
model_name = 'IAU{0:2d}{1:s}'.format(20 if name[6] == '0' else 19,
name[6:].upper())
assert kind in SIDEREAL_TIME_MODELS.keys()
assert model_name in SIDEREAL_TIME_MODELS[kind]
model = SIDEREAL_TIME_MODELS[kind][model_name]
gst_erfa = self.time_ut1._erfa_sidereal_time(model)
assert np.allclose(gst_erfa.to_value('radian'), result,
rtol=1., atol=precision)
gst = self.time_ut1.sidereal_time(kind, 'greenwich', model_name)
assert np.allclose(gst.to_value('radian'), result,
rtol=1., atol=precision)
class TestST():
"""Test Greenwich Sidereal Time. Unlike above, this is relative to
what was found earlier, so checks changes in implementation, including
leap seconds, rather than correctness"""
t1 = Time(['2012-06-30 12:00:00', '2012-06-30 23:59:59',
'2012-06-30 23:59:60', '2012-07-01 00:00:00',
'2012-07-01 12:00:00'], scale='utc')
t2 = Time(t1, location=('120d', '10d'))
def test_gmst(self):
"""Compare Greenwich Mean Sidereal Time with what was found earlier
"""
gmst_compare = np.array([6.5968497894730564, 18.629426164144697,
18.629704702452862, 18.629983240761003,
6.6628381828899643])
gmst = self.t1.sidereal_time('mean', 'greenwich')
assert allclose_hours(gmst.value, gmst_compare)
def test_gst(self):
"""Compare Greenwich Apparent Sidereal Time with what was found earlier
"""
gst_compare = np.array([6.5971168570494854, 18.629694220878296,
18.62997275921186, 18.630251297545389,
6.6631074284018244])
gst = self.t1.sidereal_time('apparent', 'greenwich')
assert allclose_hours(gst.value, gst_compare)
def test_gmst_gst_close(self):
"""Check that Mean and Apparent are within a few seconds."""
gmst = self.t1.sidereal_time('mean', 'greenwich')
gst = self.t1.sidereal_time('apparent', 'greenwich')
assert within_2_seconds(gst.value, gmst.value)
def test_gmst_independent_of_self_location(self):
"""Check that Greenwich time does not depend on self.location"""
gmst1 = self.t1.sidereal_time('mean', 'greenwich')
gmst2 = self.t2.sidereal_time('mean', 'greenwich')
assert allclose_hours(gmst1.value, gmst2.value)
@pytest.mark.parametrize('kind', ('mean', 'apparent'))
def test_lst(self, kind):
"""Compare Local Sidereal Time with what was found earlier,
as well as with what is expected from GMST
"""
lst_compare = {
'mean': np.array([14.596849789473058, 2.629426164144693,
2.6297047024528588, 2.6299832407610033,
14.662838182889967]),
'apparent': np.array([14.597116857049487, 2.6296942208782959,
2.6299727592118565, 2.6302512975453887,
14.663107428401826])}
gmst2 = self.t2.sidereal_time(kind, 'greenwich')
lmst2 = self.t2.sidereal_time(kind)
assert allclose_hours(lmst2.value, lst_compare[kind])
assert allclose_hours((lmst2-gmst2).wrap_at('12h').value,
self.t2.location.lon.to_value('hourangle'))
# check it also works when one gives longitude explicitly
lmst1 = self.t1.sidereal_time(kind, self.t2.location.lon)
assert allclose_hours(lmst1.value, lst_compare[kind])
def test_lst_needs_location(self):
with pytest.raises(ValueError):
self.t1.sidereal_time('mean')
with pytest.raises(ValueError):
self.t1.sidereal_time('mean', None)
class TestModelInterpretation():
"""Check that models are different, and that wrong models are recognized"""
t = Time(['2012-06-30 12:00:00'], scale='utc', location=('120d', '10d'))
@pytest.mark.parametrize('kind', ('mean', 'apparent'))
def test_model_uniqueness(self, kind):
"""Check models give different answers, yet are close."""
for model1, model2 in itertools.combinations(
SIDEREAL_TIME_MODELS[kind].keys(), 2):
gst1 = self.t.sidereal_time(kind, 'greenwich', model1)
gst2 = self.t.sidereal_time(kind, 'greenwich', model2)
assert np.all(gst1.value != gst2.value)
assert within_1_second(gst1.value, gst2.value)
lst1 = self.t.sidereal_time(kind, None, model1)
lst2 = self.t.sidereal_time(kind, None, model2)
assert np.all(lst1.value != lst2.value)
assert within_1_second(lst1.value, lst2.value)
@pytest.mark.parametrize(('kind', 'other'), (('mean', 'apparent'),
('apparent', 'mean')))
def test_wrong_models_raise_exceptions(self, kind, other):
with pytest.raises(ValueError):
self.t.sidereal_time(kind, 'greenwich', 'nonsense')
for model in (set(SIDEREAL_TIME_MODELS[other].keys()) -
set(SIDEREAL_TIME_MODELS[kind].keys())):
with pytest.raises(ValueError):
self.t.sidereal_time(kind, 'greenwich', model)
with pytest.raises(ValueError):
self.t.sidereal_time(kind, None, model)
|
ed0c2bde88a0454c3af99d3c6a2543c8736faf8249888f15114c8dbad183a067 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICNSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
Handles the CDS string format for units
"""
import operator
import os
import re
from .base import Base
from . import core, utils
from astropy.units.utils import is_effectively_unity
from astropy.utils import classproperty
from astropy.utils.misc import did_you_mean
# TODO: Support logarithmic units using bracketed syntax
class CDS(Base):
"""
Support the `Centre de Données astronomiques de Strasbourg
<http://cds.u-strasbg.fr/>`_ `Standards for Astronomical
Catalogues 2.0 <http://cds.u-strasbg.fr/doc/catstd-3.2.htx>`_
format, and the `complete set of supported units
<http://vizier.u-strasbg.fr/cgi-bin/Unit>`_. This format is used
by VOTable up to version 1.2.
"""
_tokens = (
'PRODUCT',
'DIVISION',
'OPEN_PAREN',
'CLOSE_PAREN',
'X',
'SIGN',
'UINT',
'UFLOAT',
'UNIT'
)
@classproperty(lazy=True)
def _units(cls):
return cls._generate_unit_names()
@classproperty(lazy=True)
def _parser(cls):
return cls._make_parser()
@classproperty(lazy=True)
def _lexer(cls):
return cls._make_lexer()
@staticmethod
def _generate_unit_names():
from astropy.units import cds
from astropy import units as u
names = {}
for key, val in cds.__dict__.items():
if isinstance(val, u.UnitBase):
names[key] = val
return names
@classmethod
def _make_lexer(cls):
from astropy.extern.ply import lex
tokens = cls._tokens
t_PRODUCT = r'\.'
t_DIVISION = r'/'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'((\d+\.?\d+)|(\.\d+))([eE][+-]?\d+)?'
if not re.search(r'[eE\.]', t.value):
t.type = 'UINT'
t.value = int(t.value)
else:
t.value = float(t.value)
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+-](?=\d)'
t.value = float(t.value + '1')
return t
def t_X(t): # multiplication for factor in front of unit
r'[x×]'
return t
def t_UNIT(t):
r'\%|°|\\h|((?!\d)\w)+'
t.value = cls._get_unit(t)
return t
t_ignore = ''
# Error handling rule
def t_error(t):
raise ValueError(
"Invalid character at col {0}".format(t.lexpos))
lexer_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'cds_lextab.py'))
lexer = lex.lex(optimize=True, lextab='cds_lextab',
outputdir=os.path.dirname(__file__),
reflags=int(re.UNICODE))
if not lexer_exists:
cls._add_tab_header('cds_lextab')
return lexer
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the `Standards
for Astronomical Catalogues 2.0
<http://cds.u-strasbg.fr/doc/catstd-3.2.htx>`_, which is not
terribly precise. The exact grammar is here is based on the
YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
"""
from astropy.extern.ply import yacc
tokens = cls._tokens
def p_main(p):
'''
main : factor combined_units
| combined_units
| factor
'''
from astropy.units.core import Unit
if len(p) == 3:
p[0] = Unit(p[1] * p[2])
else:
p[0] = Unit(p[1])
def p_combined_units(p):
'''
combined_units : product_of_units
| division_of_units
'''
p[0] = p[1]
def p_product_of_units(p):
'''
product_of_units : unit_expression PRODUCT combined_units
| unit_expression
'''
if len(p) == 4:
p[0] = p[1] * p[3]
else:
p[0] = p[1]
def p_division_of_units(p):
'''
division_of_units : DIVISION unit_expression
| unit_expression DIVISION combined_units
'''
if len(p) == 3:
p[0] = p[2] ** -1
else:
p[0] = p[1] / p[3]
def p_unit_expression(p):
'''
unit_expression : unit_with_power
| OPEN_PAREN combined_units CLOSE_PAREN
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_factor(p):
'''
factor : signed_float X UINT signed_int
| UINT X UINT signed_int
| UINT signed_int
| UINT
| signed_float
'''
if len(p) == 5:
if p[3] != 10:
raise ValueError(
"Only base ten exponents are allowed in CDS")
p[0] = p[1] * 10.0 ** p[4]
elif len(p) == 3:
if p[1] != 10:
raise ValueError(
"Only base ten exponents are allowed in CDS")
p[0] = 10.0 ** p[2]
elif len(p) == 2:
p[0] = p[1]
def p_unit_with_power(p):
'''
unit_with_power : UNIT numeric_power
| UNIT
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[1] ** p[2]
def p_numeric_power(p):
'''
numeric_power : sign UINT
'''
p[0] = p[1] * p[2]
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_signed_int(p):
'''
signed_int : SIGN UINT
'''
p[0] = p[1] * p[2]
def p_signed_float(p):
'''
signed_float : sign UINT
| sign UFLOAT
'''
p[0] = p[1] * p[2]
def p_error(p):
raise ValueError()
parser_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'cds_parsetab.py'))
parser = yacc.yacc(debug=False, tabmodule='cds_parsetab',
outputdir=os.path.dirname(__file__),
write_tables=True)
if not parser_exists:
cls._add_tab_header('cds_parsetab')
return parser
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
raise ValueError(
"At col {0}, {1}".format(
t.lexpos, str(e)))
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
"Unit '{0}' not supported by the CDS SAC "
"standard. {1}".format(
unit, did_you_mean(
unit, cls._units)))
else:
raise ValueError()
return cls._units[unit]
@classmethod
def parse(cls, s, debug=False):
if ' ' in s:
raise ValueError('CDS unit must not contain whitespace')
if not isinstance(s, str):
s = s.decode('ascii')
# This is a short circuit for the case where the string
# is just a single unit name
try:
return cls._parse_unit(s, detailed_exception=False)
except ValueError:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if str(e):
raise ValueError(str(e))
else:
raise ValueError("Syntax error")
@staticmethod
def _get_unit_name(unit):
return unit.get_format_name('cds')
@classmethod
def _format_unit_list(cls, units):
out = []
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
out.append('{0}{1}'.format(
cls._get_unit_name(base), int(power)))
return '.'.join(out)
@classmethod
def to_string(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
if(unit.physical_type == 'dimensionless' and
is_effectively_unity(unit.scale*100.)):
return '%'
if unit.scale == 1:
s = ''
else:
m, e = utils.split_mantissa_exponent(unit.scale)
parts = []
if m not in ('', '1'):
parts.append(m)
if e:
if not e.startswith('-'):
e = "+" + e
parts.append('10{0}'.format(e))
s = 'x'.join(parts)
pairs = list(zip(unit.bases, unit.powers))
if len(pairs) > 0:
pairs.sort(key=operator.itemgetter(1), reverse=True)
s += cls._format_unit_list(pairs)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
|
0982a78b18e291de316e50ddea2cb34370b2bda915df0e4f7285724a784a3721 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to this file.
# generic_parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'DOUBLE_STAR STAR PERIOD SOLIDUS CARET OPEN_PAREN CLOSE_PAREN FUNCNAME UNIT SIGN UINT UFLOAT\n main : product_of_units\n | factor product_of_units\n | factor product product_of_units\n | division_product_of_units\n | factor division_product_of_units\n | factor product division_product_of_units\n | inverse_unit\n | factor inverse_unit\n | factor product inverse_unit\n | factor\n \n division_product_of_units : division_product_of_units division product_of_units\n | product_of_units\n \n inverse_unit : division unit_expression\n \n factor : factor_fits\n | factor_float\n | factor_int\n \n factor_float : signed_float\n | signed_float UINT signed_int\n | signed_float UINT power numeric_power\n \n factor_int : UINT\n | UINT signed_int\n | UINT power numeric_power\n | UINT UINT signed_int\n | UINT UINT power numeric_power\n \n factor_fits : UINT power OPEN_PAREN signed_int CLOSE_PAREN\n | UINT power OPEN_PAREN UINT CLOSE_PAREN\n | UINT power signed_int\n | UINT power UINT\n | UINT SIGN UINT\n | UINT OPEN_PAREN signed_int CLOSE_PAREN\n \n product_of_units : unit_expression product product_of_units\n | unit_expression product_of_units\n | unit_expression\n \n unit_expression : function\n | unit_with_power\n | OPEN_PAREN product_of_units CLOSE_PAREN\n \n unit_with_power : UNIT power numeric_power\n | UNIT numeric_power\n | UNIT\n \n numeric_power : sign UINT\n | OPEN_PAREN paren_expr CLOSE_PAREN\n \n paren_expr : sign UINT\n | signed_float\n | frac\n \n frac : sign UINT division sign UINT\n \n sign : SIGN\n |\n \n product : STAR\n | PERIOD\n \n division : SOLIDUS\n \n power : DOUBLE_STAR\n | CARET\n \n signed_int : SIGN UINT\n \n signed_float : sign UINT\n | sign UFLOAT\n \n function_name : FUNCNAME\n \n function : function_name OPEN_PAREN main CLOSE_PAREN\n '
_lr_action_items = {'OPEN_PAREN':([0,3,6,7,8,9,10,11,12,13,14,16,17,18,19,21,23,26,27,28,29,34,36,38,39,41,42,43,46,47,53,54,55,57,59,60,63,64,65,67,68,73,74,77,78,79,80,82,83,],[13,13,13,-14,-15,-16,13,-34,-35,13,35,-17,-50,41,45,-56,13,-48,-49,13,13,58,-21,-51,-52,13,45,-38,-54,-55,-36,-23,45,-28,-27,-22,-29,-18,45,-37,-40,-24,-53,-30,-19,-57,-41,-26,-25,]),'UINT':([0,14,15,16,17,19,20,34,37,38,39,41,42,44,45,46,47,55,56,58,61,65,70,84,85,],[14,33,-46,40,-50,-47,46,57,63,-51,-52,14,-47,68,-47,-54,-55,-47,74,75,74,-47,81,-47,86,]),'SOLIDUS':([0,2,3,4,6,7,8,9,11,12,14,16,19,22,23,24,26,27,30,36,41,43,46,47,48,49,51,52,53,54,57,59,60,63,64,67,68,73,74,77,78,79,80,81,82,83,],[17,-12,17,17,-33,-14,-15,-16,-34,-35,-20,-17,-39,-12,17,17,-48,-49,-32,-21,17,-38,-54,-55,-12,17,-11,-31,-36,-23,-28,-27,-22,-29,-18,-37,-40,-24,-53,-30,-19,-57,-41,17,-26,-25,]),'UNIT':([0,3,6,7,8,9,10,11,12,13,14,16,17,19,23,26,27,28,29,36,41,43,46,47,53,54,57,59,60,63,64,67,68,73,74,77,78,79,80,82,83,],[19,19,19,-14,-15,-16,19,-34,-35,19,-20,-17,-50,-39,19,-48,-49,19,19,-21,19,-38,-54,-55,-36,-23,-28,-27,-22,-29,-18,-37,-40,-24,-53,-30,-19,-57,-41,-26,-25,]),'FUNCNAME':([0,3,6,7,8,9,10,11,12,13,14,16,17,19,23,26,27,28,29,36,41,43,46,47,53,54,57,59,60,63,64,67,68,73,74,77,78,79,80,82,83,],[21,21,21,-14,-15,-16,21,-34,-35,21,-20,-17,-50,-39,21,-48,-49,21,21,-21,21,-38,-54,-55,-36,-23,-28,-27,-22,-29,-18,-37,-40,-24,-53,-30,-19,-57,-41,-26,-25,]),'SIGN':([0,14,17,19,33,34,35,38,39,40,41,42,45,55,58,65,84,],[15,37,-50,15,56,61,56,-51,-52,56,15,15,15,15,61,15,15,]),'UFLOAT':([0,15,20,41,45,58,61,70,],[-47,-46,47,-47,-47,-47,-46,47,]),'$end':([1,2,3,4,5,6,7,8,9,11,12,14,16,19,22,24,25,30,31,36,43,46,47,48,49,50,51,52,53,54,57,59,60,63,64,67,68,73,74,77,78,79,80,82,83,],[0,-1,-10,-4,-7,-33,-14,-15,-16,-34,-35,-20,-17,-39,-2,-5,-8,-32,-13,-21,-38,-54,-55,-3,-6,-9,-11,-31,-36,-23,-28,-27,-22,-29,-18,-37,-40,-24,-53,-30,-19,-57,-41,-26,-25,]),'CLOSE_PAREN':([2,3,4,5,6,7,8,9,11,12,14,16,19,22,24,25,30,31,32,36,43,46,47,48,49,50,51,52,53,54,57,59,60,62,63,64,66,67,68,69,71,72,73,74,75,76,77,78,79,80,81,82,83,86,],[-1,-10,-4,-7,-33,-14,-15,-16,-34,-35,-20,-17,-39,-2,-5,-8,-32,-13,53,-21,-38,-54,-55,-3,-6,-9,-11,-31,-36,-23,-28,-27,-22,77,-29,-18,79,-37,-40,80,-43,-44,-24,-53,82,83,-30,-19,-57,-41,-42,-26,-25,-45,]),'STAR':([3,6,7,8,9,11,12,14,16,19,36,43,46,47,53,54,57,59,60,63,64,67,68,73,74,77,78,79,80,82,83,],[26,26,-14,-15,-16,-34,-35,-20,-17,-39,-21,-38,-54,-55,-36,-23,-28,-27,-22,-29,-18,-37,-40,-24,-53,-30,-19,-57,-41,-26,-25,]),'PERIOD':([3,6,7,8,9,11,12,14,16,19,36,43,46,47,53,54,57,59,60,63,64,67,68,73,74,77,78,79,80,82,83,],[27,27,-14,-15,-16,-34,-35,-20,-17,-39,-21,-38,-54,-55,-36,-23,-28,-27,-22,-29,-18,-37,-40,-24,-53,-30,-19,-57,-41,-26,-25,]),'DOUBLE_STAR':([14,19,33,40,],[38,38,38,38,]),'CARET':([14,19,33,40,],[39,39,39,39,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'main':([0,41,],[1,66,]),'product_of_units':([0,3,6,13,23,28,29,41,],[2,22,30,32,48,51,52,2,]),'factor':([0,41,],[3,3,]),'division_product_of_units':([0,3,23,41,],[4,24,49,4,]),'inverse_unit':([0,3,23,41,],[5,25,50,5,]),'unit_expression':([0,3,6,10,13,23,28,29,41,],[6,6,6,31,6,6,6,6,6,]),'factor_fits':([0,41,],[7,7,]),'factor_float':([0,41,],[8,8,]),'factor_int':([0,41,],[9,9,]),'division':([0,3,4,23,24,41,49,81,],[10,10,28,10,28,10,28,84,]),'function':([0,3,6,10,13,23,28,29,41,],[11,11,11,11,11,11,11,11,11,]),'unit_with_power':([0,3,6,10,13,23,28,29,41,],[12,12,12,12,12,12,12,12,12,]),'signed_float':([0,41,45,58,],[16,16,71,71,]),'function_name':([0,3,6,10,13,23,28,29,41,],[18,18,18,18,18,18,18,18,18,]),'sign':([0,19,34,41,42,45,55,58,65,84,],[20,44,44,20,44,70,44,70,44,85,]),'product':([3,6,],[23,29,]),'power':([14,19,33,40,],[34,42,55,65,]),'signed_int':([14,33,34,35,40,58,],[36,54,59,62,64,76,]),'numeric_power':([19,34,42,55,65,],[43,60,67,73,78,]),'paren_expr':([45,58,],[69,69,]),'frac':([45,58,],[72,72,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> main","S'",1,None,None,None),
('main -> product_of_units','main',1,'p_main','generic.py',193),
('main -> factor product_of_units','main',2,'p_main','generic.py',194),
('main -> factor product product_of_units','main',3,'p_main','generic.py',195),
('main -> division_product_of_units','main',1,'p_main','generic.py',196),
('main -> factor division_product_of_units','main',2,'p_main','generic.py',197),
('main -> factor product division_product_of_units','main',3,'p_main','generic.py',198),
('main -> inverse_unit','main',1,'p_main','generic.py',199),
('main -> factor inverse_unit','main',2,'p_main','generic.py',200),
('main -> factor product inverse_unit','main',3,'p_main','generic.py',201),
('main -> factor','main',1,'p_main','generic.py',202),
('division_product_of_units -> division_product_of_units division product_of_units','division_product_of_units',3,'p_division_product_of_units','generic.py',214),
('division_product_of_units -> product_of_units','division_product_of_units',1,'p_division_product_of_units','generic.py',215),
('inverse_unit -> division unit_expression','inverse_unit',2,'p_inverse_unit','generic.py',225),
('factor -> factor_fits','factor',1,'p_factor','generic.py',231),
('factor -> factor_float','factor',1,'p_factor','generic.py',232),
('factor -> factor_int','factor',1,'p_factor','generic.py',233),
('factor_float -> signed_float','factor_float',1,'p_factor_float','generic.py',239),
('factor_float -> signed_float UINT signed_int','factor_float',3,'p_factor_float','generic.py',240),
('factor_float -> signed_float UINT power numeric_power','factor_float',4,'p_factor_float','generic.py',241),
('factor_int -> UINT','factor_int',1,'p_factor_int','generic.py',254),
('factor_int -> UINT signed_int','factor_int',2,'p_factor_int','generic.py',255),
('factor_int -> UINT power numeric_power','factor_int',3,'p_factor_int','generic.py',256),
('factor_int -> UINT UINT signed_int','factor_int',3,'p_factor_int','generic.py',257),
('factor_int -> UINT UINT power numeric_power','factor_int',4,'p_factor_int','generic.py',258),
('factor_fits -> UINT power OPEN_PAREN signed_int CLOSE_PAREN','factor_fits',5,'p_factor_fits','generic.py',276),
('factor_fits -> UINT power OPEN_PAREN UINT CLOSE_PAREN','factor_fits',5,'p_factor_fits','generic.py',277),
('factor_fits -> UINT power signed_int','factor_fits',3,'p_factor_fits','generic.py',278),
('factor_fits -> UINT power UINT','factor_fits',3,'p_factor_fits','generic.py',279),
('factor_fits -> UINT SIGN UINT','factor_fits',3,'p_factor_fits','generic.py',280),
('factor_fits -> UINT OPEN_PAREN signed_int CLOSE_PAREN','factor_fits',4,'p_factor_fits','generic.py',281),
('product_of_units -> unit_expression product product_of_units','product_of_units',3,'p_product_of_units','generic.py',300),
('product_of_units -> unit_expression product_of_units','product_of_units',2,'p_product_of_units','generic.py',301),
('product_of_units -> unit_expression','product_of_units',1,'p_product_of_units','generic.py',302),
('unit_expression -> function','unit_expression',1,'p_unit_expression','generic.py',313),
('unit_expression -> unit_with_power','unit_expression',1,'p_unit_expression','generic.py',314),
('unit_expression -> OPEN_PAREN product_of_units CLOSE_PAREN','unit_expression',3,'p_unit_expression','generic.py',315),
('unit_with_power -> UNIT power numeric_power','unit_with_power',3,'p_unit_with_power','generic.py',324),
('unit_with_power -> UNIT numeric_power','unit_with_power',2,'p_unit_with_power','generic.py',325),
('unit_with_power -> UNIT','unit_with_power',1,'p_unit_with_power','generic.py',326),
('numeric_power -> sign UINT','numeric_power',2,'p_numeric_power','generic.py',337),
('numeric_power -> OPEN_PAREN paren_expr CLOSE_PAREN','numeric_power',3,'p_numeric_power','generic.py',338),
('paren_expr -> sign UINT','paren_expr',2,'p_paren_expr','generic.py',347),
('paren_expr -> signed_float','paren_expr',1,'p_paren_expr','generic.py',348),
('paren_expr -> frac','paren_expr',1,'p_paren_expr','generic.py',349),
('frac -> sign UINT division sign UINT','frac',5,'p_frac','generic.py',358),
('sign -> SIGN','sign',1,'p_sign','generic.py',364),
('sign -> <empty>','sign',0,'p_sign','generic.py',365),
('product -> STAR','product',1,'p_product','generic.py',374),
('product -> PERIOD','product',1,'p_product','generic.py',375),
('division -> SOLIDUS','division',1,'p_division','generic.py',381),
('power -> DOUBLE_STAR','power',1,'p_power','generic.py',387),
('power -> CARET','power',1,'p_power','generic.py',388),
('signed_int -> SIGN UINT','signed_int',2,'p_signed_int','generic.py',394),
('signed_float -> sign UINT','signed_float',2,'p_signed_float','generic.py',400),
('signed_float -> sign UFLOAT','signed_float',2,'p_signed_float','generic.py',401),
('function_name -> FUNCNAME','function_name',1,'p_function_name','generic.py',407),
('function -> function_name OPEN_PAREN main CLOSE_PAREN','function',4,'p_function','generic.py',413),
]
|
7dd1afdce1a622e7ab096c2c443b4fbf4dda7face33581061e3023e97e050c94 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "VOUnit" unit format.
"""
import copy
import keyword
import operator
import re
import warnings
from . import core, generic, utils
class VOUnit(generic.Generic):
"""
The IVOA standard for units used by the VO.
This is an implementation of `Units in the VO 1.0
<http://www.ivoa.net/Documents/VOUnits/>`_.
"""
_explicit_custom_unit_regex = re.compile(
r"^[YZEPTGMkhdcmunpfazy]?'((?!\d)\w)+'$")
_custom_unit_regex = re.compile(r"^((?!\d)\w)+$")
_custom_units = {}
@staticmethod
def _generate_unit_names():
from astropy import units as u
from astropy.units import required_by_vounit as uvo
names = {}
deprecated_names = set()
bases = [
'A', 'C', 'D', 'F', 'G', 'H', 'Hz', 'J', 'Jy', 'K', 'N',
'Ohm', 'Pa', 'R', 'Ry', 'S', 'T', 'V', 'W', 'Wb', 'a',
'adu', 'arcmin', 'arcsec', 'barn', 'beam', 'bin', 'cd',
'chan', 'count', 'ct', 'd', 'deg', 'eV', 'erg', 'g', 'h',
'lm', 'lx', 'lyr', 'm', 'mag', 'min', 'mol', 'pc', 'ph',
'photon', 'pix', 'pixel', 'rad', 'rad', 's', 'solLum',
'solMass', 'solRad', 'sr', 'u', 'voxel', 'yr'
]
binary_bases = [
'bit', 'byte', 'B'
]
simple_units = [
'Angstrom', 'angstrom', 'AU', 'au', 'Ba', 'dB', 'mas'
]
si_prefixes = [
'y', 'z', 'a', 'f', 'p', 'n', 'u', 'm', 'c', 'd',
'', 'da', 'h', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'
]
binary_prefixes = [
'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei'
]
deprecated_units = set([
'a', 'angstrom', 'Angstrom', 'au', 'Ba', 'barn', 'ct',
'erg', 'G', 'ph', 'pix'
])
def do_defines(bases, prefixes, skips=[]):
for base in bases:
for prefix in prefixes:
key = prefix + base
if key in skips:
continue
if keyword.iskeyword(key):
continue
names[key] = getattr(u if hasattr(u, key) else uvo, key)
if base in deprecated_units:
deprecated_names.add(key)
do_defines(bases, si_prefixes, ['pct', 'pcount', 'yd'])
do_defines(binary_bases, si_prefixes + binary_prefixes, ['dB', 'dbyte'])
do_defines(simple_units, [''])
return names, deprecated_names, []
@classmethod
def parse(cls, s, debug=False):
if s in ('unknown', 'UNKNOWN'):
return None
if s == '':
return core.dimensionless_unscaled
if s.count('/') > 1:
raise core.UnitsError(
"'{0}' contains multiple slashes, which is "
"disallowed by the VOUnit standard".format(s))
result = cls._do_parse(s, debug=debug)
if hasattr(result, 'function_unit'):
raise ValueError("Function units are not yet supported in "
"VOUnit.")
return result
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if cls._explicit_custom_unit_regex.match(unit):
return cls._def_custom_unit(unit)
if not cls._custom_unit_regex.match(unit):
raise ValueError()
warnings.warn(
"Unit {0!r} not supported by the VOUnit "
"standard. {1}".format(
unit, utils.did_you_mean_units(
unit, cls._units, cls._deprecated_units,
cls._to_decomposed_alternative)),
core.UnitsWarning)
return cls._def_custom_unit(unit)
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], 'VOUnit',
cls._to_decomposed_alternative)
return cls._units[unit]
@classmethod
def _get_unit_name(cls, unit):
# The da- and d- prefixes are discouraged. This has the
# effect of adding a scale to value in the result.
if isinstance(unit, core.PrefixUnit):
if unit._represents.scale == 10.0:
raise ValueError(
"In '{0}': VOUnit can not represent units with the 'da' "
"(deka) prefix".format(unit))
elif unit._represents.scale == 0.1:
raise ValueError(
"In '{0}': VOUnit can not represent units with the 'd' "
"(deci) prefix".format(unit))
name = unit.get_format_name('vounit')
if unit in cls._custom_units.values():
return name
if name not in cls._units:
raise ValueError(
"Unit {0!r} is not part of the VOUnit standard".format(name))
if name in cls._deprecated_units:
utils.unit_deprecation_warning(
name, unit, 'VOUnit',
cls._to_decomposed_alternative)
return name
@classmethod
def _def_custom_unit(cls, unit):
def def_base(name):
if name in cls._custom_units:
return cls._custom_units[name]
if name.startswith("'"):
return core.def_unit(
[name[1:-1], name],
format={'vounit': name},
namespace=cls._custom_units)
else:
return core.def_unit(
name, namespace=cls._custom_units)
if unit in cls._custom_units:
return cls._custom_units[unit]
for short, full, factor in core.si_prefixes:
for prefix in short:
if unit.startswith(prefix):
base_name = unit[len(prefix):]
base_unit = def_base(base_name)
return core.PrefixUnit(
[prefix + x for x in base_unit.names],
core.CompositeUnit(factor, [base_unit], [1],
_error_check=False),
format={'vounit': prefix + base_unit.names[-1]},
namespace=cls._custom_units)
return def_base(unit)
@classmethod
def to_string(cls, unit):
from astropy.units import core
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
if unit.physical_type == 'dimensionless' and unit.scale != 1:
raise core.UnitScaleError(
"The VOUnit format is not able to "
"represent scale for dimensionless units. "
"Multiply your data by {0:e}."
.format(unit.scale))
s = ''
if unit.scale != 1:
m, ex = utils.split_mantissa_exponent(unit.scale)
parts = []
if m:
parts.append(m)
if ex:
fex = '10'
if not ex.startswith('-'):
fex += '+'
fex += ex
parts.append(fex)
s += ' '.join(parts)
pairs = list(zip(unit.bases, unit.powers))
pairs.sort(key=operator.itemgetter(1), reverse=True)
s += cls._format_unit_list(pairs)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
@classmethod
def _to_decomposed_alternative(cls, unit):
from astropy.units import core
try:
s = cls.to_string(unit)
except core.UnitScaleError:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return '{0} (with data multiplied by {1})'.format(
cls.to_string(unit), scale)
return s
|
906d8953f2de8f80c665e7acd63cb915170ba4b8ebc12a42c177c8c29c2fb7de | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "LaTeX" unit format.
"""
import numpy as np
from . import base, core, utils
class Latex(base.Base):
"""
Output LaTeX to display the unit based on IAU style guidelines.
Attempts to follow the `IAU Style Manual
<https://www.iau.org/static/publications/stylemanual1989.pdf>`_.
"""
@classmethod
def _latex_escape(cls, name):
# This doesn't escape arbitrary LaTeX strings, but it should
# be good enough for unit names which are required to be alpha
# + "_" anyway.
return name.replace('_', r'\_')
@classmethod
def _get_unit_name(cls, unit):
name = unit.get_format_name('latex')
if name == unit.name:
return cls._latex_escape(name)
return name
@classmethod
def _format_unit_list(cls, units):
out = []
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
out.append('{0}^{{{1}}}'.format(
cls._get_unit_name(base),
utils.format_power(power)))
return r'\,'.join(out)
@classmethod
def _format_bases(cls, unit):
positives, negatives = utils.get_grouped_by_powers(
unit.bases, unit.powers)
if len(negatives):
if len(positives):
positives = cls._format_unit_list(positives)
else:
positives = '1'
negatives = cls._format_unit_list(negatives)
s = r'\frac{{{0}}}{{{1}}}'.format(positives, negatives)
else:
positives = cls._format_unit_list(positives)
s = positives
return s
@classmethod
def to_string(cls, unit):
latex_name = None
if hasattr(unit, '_format'):
latex_name = unit._format.get('latex')
if latex_name is not None:
s = latex_name
elif isinstance(unit, core.CompositeUnit):
if unit.scale == 1:
s = ''
else:
s = cls.format_exponential_notation(unit.scale) + r'\,'
if len(unit.bases):
s += cls._format_bases(unit)
elif isinstance(unit, core.NamedUnit):
s = cls._latex_escape(unit.name)
return r'$\mathrm{{{0}}}$'.format(s)
@classmethod
def format_exponential_notation(cls, val, format_spec=".8g"):
"""
Formats a value in exponential notation for LaTeX.
Parameters
----------
val : number
The value to be formatted
format_spec : str, optional
Format used to split up mantissa and exponent
Returns
-------
latex_string : str
The value in exponential notation in a format suitable for LaTeX.
"""
if np.isfinite(val):
m, ex = utils.split_mantissa_exponent(val, format_spec)
parts = []
if m:
parts.append(m)
if ex:
parts.append("10^{{{0}}}".format(ex))
return r" \times ".join(parts)
else:
if np.isnan(val):
return r'{\rm NaN}'
elif val > 0:
# positive infinity
return r'\infty'
else:
# negative infinity
return r'-\infty'
class LatexInline(Latex):
"""
Output LaTeX to display the unit based on IAU style guidelines with negative
powers.
Attempts to follow the `IAU Style Manual
<https://www.iau.org/static/publications/stylemanual1989.pdf>`_ and the
`ApJ and AJ style guide
<https://journals.aas.org/manuscript-preparation/>`_.
"""
name = 'latex_inline'
@classmethod
def _format_bases(cls, unit):
return cls._format_unit_list(zip(unit.bases, unit.powers))
|
3f97575439f4cbf7d6ea85d911bd3b1766fe83270cf0e78d84abca168a912faa | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from astropy.utils.misc import InheritDocstrings
class _FormatterMeta(InheritDocstrings):
registry = {}
def __new__(mcls, name, bases, members):
if 'name' in members:
formatter_name = members['name'].lower()
else:
formatter_name = members['name'] = name.lower()
cls = super().__new__(mcls, name, bases, members)
mcls.registry[formatter_name] = cls
return cls
TAB_HEADER = """# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to this file.
"""
class Base(metaclass=_FormatterMeta):
"""
The abstract base class of all unit formats.
"""
def __new__(cls, *args, **kwargs):
# This __new__ is to make it clear that there is no reason to
# instantiate a Formatter--if you try to you'll just get back the
# class
return cls
@classmethod
def parse(cls, s):
"""
Convert a string to a unit object.
"""
raise NotImplementedError(
"Can not parse {0}".format(cls.__name__))
@classmethod
def to_string(cls, u):
"""
Convert a unit object to a string.
"""
raise NotImplementedError(
"Can not output in {0} format".format(cls.__name__))
@classmethod
def _add_tab_header(cls, name):
lextab_file = os.path.join(os.path.dirname(__file__), name + '.py')
with open(lextab_file, 'r') as f:
contents = f.read()
with open(lextab_file, 'w') as f:
f.write(TAB_HEADER)
f.write(contents)
|
3bc5ae6b911d3e458b8eadb1183bb4a4e3df7d4e708d73925e0cdc187cd0ad24 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICNSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
Handles units in `Office of Guest Investigator Programs (OGIP)
FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__.
"""
import keyword
import math
import os
import copy
import warnings
from fractions import Fraction
from . import core, generic, utils
class OGIP(generic.Generic):
"""
Support the units in `Office of Guest Investigator Programs (OGIP)
FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__.
"""
_tokens = (
'DIVISION',
'OPEN_PAREN',
'CLOSE_PAREN',
'WHITESPACE',
'STARSTAR',
'STAR',
'SIGN',
'UFLOAT',
'LIT10',
'UINT',
'UNKNOWN',
'UNIT'
)
@staticmethod
def _generate_unit_names():
from astropy import units as u
names = {}
deprecated_names = set()
bases = [
'A', 'C', 'cd', 'eV', 'F', 'g', 'H', 'Hz', 'J',
'Jy', 'K', 'lm', 'lx', 'm', 'mol', 'N', 'ohm', 'Pa',
'pc', 'rad', 's', 'S', 'sr', 'T', 'V', 'W', 'Wb'
]
deprecated_bases = []
prefixes = [
'y', 'z', 'a', 'f', 'p', 'n', 'u', 'm', 'c', 'd',
'', 'da', 'h', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'
]
for base in bases + deprecated_bases:
for prefix in prefixes:
key = prefix + base
if keyword.iskeyword(key):
continue
names[key] = getattr(u, key)
for base in deprecated_bases:
for prefix in prefixes:
deprecated_names.add(prefix + base)
simple_units = [
'angstrom', 'arcmin', 'arcsec', 'AU', 'barn', 'bin',
'byte', 'chan', 'count', 'day', 'deg', 'erg', 'G',
'h', 'lyr', 'mag', 'min', 'photon', 'pixel',
'voxel', 'yr'
]
for unit in simple_units:
names[unit] = getattr(u, unit)
# Create a separate, disconnected unit for the special case of
# Crab and mCrab, since OGIP doesn't define their quantities.
Crab = u.def_unit(['Crab'], prefixes=False, doc='Crab (X-ray flux)')
mCrab = u.Unit(10 ** -3 * Crab)
names['Crab'] = Crab
names['mCrab'] = mCrab
deprecated_units = ['Crab', 'mCrab']
for unit in deprecated_units:
deprecated_names.add(unit)
# Define the function names, so we can parse them, even though
# we can't use any of them (other than sqrt) meaningfully for
# now.
functions = [
'log', 'ln', 'exp', 'sqrt', 'sin', 'cos', 'tan', 'asin',
'acos', 'atan', 'sinh', 'cosh', 'tanh'
]
for name in functions:
names[name] = name
return names, deprecated_names, functions
@classmethod
def _make_lexer(cls):
from astropy.extern.ply import lex
tokens = cls._tokens
t_DIVISION = r'/'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
t_WHITESPACE = '[ \t]+'
t_STARSTAR = r'\*\*'
t_STAR = r'\*'
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'(((\d+\.?\d*)|(\.\d+))([eE][+-]?\d+))|(((\d+\.\d*)|(\.\d+))([eE][+-]?\d+)?)'
t.value = float(t.value)
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+-](?=\d)'
t.value = float(t.value + '1')
return t
def t_X(t): # multiplication for factor in front of unit
r'[x×]'
return t
def t_LIT10(t):
r'10'
return 10
def t_UNKNOWN(t):
r'[Uu][Nn][Kk][Nn][Oo][Ww][Nn]'
return None
def t_UNIT(t):
r'[a-zA-Z][a-zA-Z_]*'
t.value = cls._get_unit(t)
return t
# Don't ignore whitespace
t_ignore = ''
# Error handling rule
def t_error(t):
raise ValueError(
"Invalid character at col {0}".format(t.lexpos))
lexer_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'ogip_lextab.py'))
lexer = lex.lex(optimize=True, lextab='ogip_lextab',
outputdir=os.path.dirname(__file__))
if not lexer_exists:
cls._add_tab_header('ogip_lextab')
return lexer
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the
`Specification of Physical Units within OGIP FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__,
which is not terribly precise. The exact grammar is here is
based on the YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
"""
from astropy.extern.ply import yacc
tokens = cls._tokens
def p_main(p):
'''
main : UNKNOWN
| complete_expression
| scale_factor complete_expression
| scale_factor WHITESPACE complete_expression
'''
if len(p) == 4:
p[0] = p[1] * p[3]
elif len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1]
def p_complete_expression(p):
'''
complete_expression : product_of_units
'''
p[0] = p[1]
def p_product_of_units(p):
'''
product_of_units : unit_expression
| division unit_expression
| product_of_units product unit_expression
| product_of_units division unit_expression
'''
if len(p) == 4:
if p[2] == 'DIVISION':
p[0] = p[1] / p[3]
else:
p[0] = p[1] * p[3]
elif len(p) == 3:
p[0] = p[2] ** -1
else:
p[0] = p[1]
def p_unit_expression(p):
'''
unit_expression : unit
| UNIT OPEN_PAREN complete_expression CLOSE_PAREN
| OPEN_PAREN complete_expression CLOSE_PAREN
| UNIT OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power
| OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power
'''
# If we run p[1] in cls._functions, it will try and parse each
# item in the list into a unit, which is slow. Since we know that
# all the items in the list are strings, we can simply convert
# p[1] to a string instead.
p1_str = str(p[1])
if p1_str in cls._functions and p1_str != 'sqrt':
raise ValueError(
"The function '{0}' is valid in OGIP, but not understood "
"by astropy.units.".format(
p[1]))
if len(p) == 7:
if p1_str == 'sqrt':
p[0] = p[1] * p[3] ** (0.5 * p[6])
else:
p[0] = p[1] * p[3] ** p[6]
elif len(p) == 6:
p[0] = p[2] ** p[5]
elif len(p) == 5:
if p1_str == 'sqrt':
p[0] = p[3] ** 0.5
else:
p[0] = p[1] * p[3]
elif len(p) == 4:
p[0] = p[2]
else:
p[0] = p[1]
def p_scale_factor(p):
'''
scale_factor : LIT10 power numeric_power
| LIT10
| signed_float
| signed_float power numeric_power
| signed_int power numeric_power
'''
if len(p) == 4:
p[0] = 10 ** p[3]
else:
p[0] = p[1]
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(p[0]) % 1.0 != 0.0:
from astropy.units.core import UnitsWarning
warnings.warn(
"'{0}' scale should be a power of 10 in "
"OGIP format".format(p[0]), UnitsWarning)
def p_division(p):
'''
division : DIVISION
| WHITESPACE DIVISION
| WHITESPACE DIVISION WHITESPACE
| DIVISION WHITESPACE
'''
p[0] = 'DIVISION'
def p_product(p):
'''
product : WHITESPACE
| STAR
| WHITESPACE STAR
| WHITESPACE STAR WHITESPACE
| STAR WHITESPACE
'''
p[0] = 'PRODUCT'
def p_power(p):
'''
power : STARSTAR
'''
p[0] = 'POWER'
def p_unit(p):
'''
unit : UNIT
| UNIT power numeric_power
'''
if len(p) == 4:
p[0] = p[1] ** p[3]
else:
p[0] = p[1]
def p_numeric_power(p):
'''
numeric_power : UINT
| signed_float
| OPEN_PAREN signed_int CLOSE_PAREN
| OPEN_PAREN signed_float CLOSE_PAREN
| OPEN_PAREN signed_float division UINT CLOSE_PAREN
'''
if len(p) == 6:
p[0] = Fraction(int(p[2]), int(p[4]))
elif len(p) == 4:
p[0] = p[2]
else:
p[0] = p[1]
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_signed_int(p):
'''
signed_int : SIGN UINT
'''
p[0] = p[1] * p[2]
def p_signed_float(p):
'''
signed_float : sign UINT
| sign UFLOAT
'''
p[0] = p[1] * p[2]
def p_error(p):
raise ValueError()
parser_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'ogip_parsetab.py'))
parser = yacc.yacc(debug=False, tabmodule='ogip_parsetab',
outputdir=os.path.dirname(__file__),
write_tables=True)
if not parser_exists:
cls._add_tab_header('ogip_parsetab')
return parser
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
raise ValueError(
"At col {0}, '{1}': {2}".format(
t.lexpos, t.value, str(e)))
@classmethod
def _validate_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
"Unit '{0}' not supported by the OGIP "
"standard. {1}".format(
unit, utils.did_you_mean_units(
unit, cls._units, cls._deprecated_units,
cls._to_decomposed_alternative)))
else:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], 'OGIP',
cls._to_decomposed_alternative)
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
cls._validate_unit(unit, detailed_exception=detailed_exception)
return cls._units[unit]
@classmethod
def parse(cls, s, debug=False):
s = s.strip()
try:
# This is a short circuit for the case where the string is
# just a single unit name
return cls._parse_unit(s, detailed_exception=False)
except ValueError:
try:
return core.Unit(
cls._parser.parse(s, lexer=cls._lexer, debug=debug))
except ValueError as e:
if str(e):
raise
else:
raise ValueError(
"Syntax error parsing unit '{0}'".format(s))
@classmethod
def _get_unit_name(cls, unit):
name = unit.get_format_name('ogip')
cls._validate_unit(name)
return name
@classmethod
def _format_unit_list(cls, units):
out = []
units.sort(key=lambda x: cls._get_unit_name(x[0]).lower())
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
power = utils.format_power(power)
if '/' in power:
out.append('{0}**({1})'.format(
cls._get_unit_name(base), power))
else:
out.append('{0}**{1}'.format(
cls._get_unit_name(base), power))
return ' '.join(out)
@classmethod
def to_string(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(unit.scale) % 1.0 != 0.0:
warnings.warn(
"'{0}' scale should be a power of 10 in "
"OGIP format".format(
unit.scale),
core.UnitsWarning)
return generic._to_string(cls, unit)
@classmethod
def _to_decomposed_alternative(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(unit.scale) % 1.0 != 0.0:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return '{0} (with data multiplied by {1})'.format(
generic._to_string(cls, unit), scale)
return generic._to_string(unit)
|
25ffa7c1dd6a0dc703d713fa43bf9c01ac0ec70a03b5e6c49f9ea93d4fed56ea | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities shared by the different formats.
"""
import warnings
from fractions import Fraction
from astropy.utils.misc import did_you_mean
def get_grouped_by_powers(bases, powers):
"""
Groups the powers and bases in the given
`~astropy.units.CompositeUnit` into positive powers and
negative powers for easy display on either side of a solidus.
Parameters
----------
bases : list of `astropy.units.UnitBase` instances
powers : list of ints
Returns
-------
positives, negatives : tuple of lists
Each element in each list is tuple of the form (*base*,
*power*). The negatives have the sign of their power reversed
(i.e. the powers are all positive).
"""
positive = []
negative = []
for base, power in zip(bases, powers):
if power < 0:
negative.append((base, -power))
elif power > 0:
positive.append((base, power))
else:
raise ValueError("Unit with 0 power")
return positive, negative
def split_mantissa_exponent(v, format_spec=".8g"):
"""
Given a number, split it into its mantissa and base 10 exponent
parts, each as strings. If the exponent is too small, it may be
returned as the empty string.
Parameters
----------
v : float
format_spec : str, optional
Number representation formatting string
Returns
-------
mantissa, exponent : tuple of strings
"""
x = format(v, format_spec).split('e')
if x[0] != '1.' + '0' * (len(x[0]) - 2):
m = x[0]
else:
m = ''
if len(x) == 2:
ex = x[1].lstrip("0+")
if len(ex) > 0 and ex[0] == '-':
ex = '-' + ex[1:].lstrip('0')
else:
ex = ''
return m, ex
def decompose_to_known_units(unit, func):
"""
Partially decomposes a unit so it is only composed of units that
are "known" to a given format.
Parameters
----------
unit : `~astropy.units.UnitBase` instance
func : callable
This function will be called to determine if a given unit is
"known". If the unit is not known, this function should raise a
`ValueError`.
Returns
-------
unit : `~astropy.units.UnitBase` instance
A flattened unit.
"""
from astropy.units import core
if isinstance(unit, core.CompositeUnit):
new_unit = core.Unit(unit.scale)
for base, power in zip(unit.bases, unit.powers):
new_unit = new_unit * decompose_to_known_units(base, func) ** power
return new_unit
elif isinstance(unit, core.NamedUnit):
try:
func(unit)
except ValueError:
if isinstance(unit, core.Unit):
return decompose_to_known_units(unit._represents, func)
raise
return unit
def format_power(power):
"""
Converts a value for a power (which may be floating point or a
`fractions.Fraction` object), into a string either looking like
an integer or a fraction.
"""
if not isinstance(power, Fraction):
if power % 1.0 != 0.0:
frac = Fraction.from_float(power)
power = frac.limit_denominator(10)
if power.denominator == 1:
power = int(power.numerator)
else:
power = int(power)
return str(power)
def _try_decomposed(unit, format_decomposed):
represents = getattr(unit, '_represents', None)
if represents is not None:
try:
represents_string = format_decomposed(represents)
except ValueError:
pass
else:
return represents_string
decomposed = unit.decompose()
if decomposed is not unit:
try:
decompose_string = format_decomposed(decomposed)
except ValueError:
pass
else:
return decompose_string
return None
def did_you_mean_units(s, all_units, deprecated_units, format_decomposed):
"""
A wrapper around `astropy.utils.misc.did_you_mean` that deals with
the display of deprecated units.
Parameters
----------
s : str
The invalid unit string
all_units : dict
A mapping from valid unit names to unit objects.
deprecated_units : sequence
The deprecated unit names
format_decomposed : callable
A function to turn a decomposed version of the unit into a
string. Should return `None` if not possible
Returns
-------
msg : str
A string message with a list of alternatives, or the empty
string.
"""
def fix_deprecated(x):
if x in deprecated_units:
results = [x + ' (deprecated)']
decomposed = _try_decomposed(
all_units[x], format_decomposed)
if decomposed is not None:
results.append(decomposed)
return results
return (x,)
return did_you_mean(s, all_units, fix=fix_deprecated)
def unit_deprecation_warning(s, unit, standard_name, format_decomposed):
"""
Raises a UnitsWarning about a deprecated unit in a given format.
Suggests a decomposed alternative if one is available.
Parameters
----------
s : str
The deprecated unit name.
unit : astropy.units.core.UnitBase
The unit object.
standard_name : str
The name of the format for which the unit is deprecated.
format_decomposed : callable
A function to turn a decomposed version of the unit into a
string. Should return `None` if not possible
"""
from astropy.units.core import UnitsWarning
message = "The unit '{0}' has been deprecated in the {1} standard.".format(
s, standard_name)
decomposed = _try_decomposed(unit, format_decomposed)
if decomposed is not None:
message += " Suggested: {0}.".format(decomposed)
warnings.warn(message, UnitsWarning)
|
8cee085a4b66fbadb1ef6bfa490584aab955d4551fe3a69e3a17454236072141 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "FITS" unit format.
"""
import numpy as np
import copy
import keyword
import operator
from . import core, generic, utils
class Fits(generic.Generic):
"""
The FITS standard unit format.
This supports the format defined in the Units section of the `FITS
Standard <https://fits.gsfc.nasa.gov/fits_standard.html>`_.
"""
name = 'fits'
@staticmethod
def _generate_unit_names():
from astropy import units as u
names = {}
deprecated_names = set()
# Note about deprecated units: before v2.0, several units were treated
# as deprecated (G, barn, erg, Angstrom, angstrom). However, in the
# FITS 3.0 standard, these units are explicitly listed in the allowed
# units, but deprecated in the IAU Style Manual (McNally 1988). So
# after discussion (https://github.com/astropy/astropy/issues/2933),
# these units have been removed from the lists of deprecated units and
# bases.
bases = [
'm', 'g', 's', 'rad', 'sr', 'K', 'A', 'mol', 'cd',
'Hz', 'J', 'W', 'V', 'N', 'Pa', 'C', 'Ohm', 'S',
'F', 'Wb', 'T', 'H', 'lm', 'lx', 'a', 'yr', 'eV',
'pc', 'Jy', 'mag', 'R', 'bit', 'byte', 'G', 'barn'
]
deprecated_bases = []
prefixes = [
'y', 'z', 'a', 'f', 'p', 'n', 'u', 'm', 'c', 'd',
'', 'da', 'h', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
special_cases = {'dbyte': u.Unit('dbyte', 0.1*u.byte)}
for base in bases + deprecated_bases:
for prefix in prefixes:
key = prefix + base
if keyword.iskeyword(key):
continue
elif key in special_cases:
names[key] = special_cases[key]
else:
names[key] = getattr(u, key)
for base in deprecated_bases:
for prefix in prefixes:
deprecated_names.add(prefix + base)
simple_units = [
'deg', 'arcmin', 'arcsec', 'mas', 'min', 'h', 'd', 'Ry',
'solMass', 'u', 'solLum', 'solRad', 'AU', 'lyr', 'count',
'ct', 'photon', 'ph', 'pixel', 'pix', 'D', 'Sun', 'chan',
'bin', 'voxel', 'adu', 'beam', 'erg', 'Angstrom', 'angstrom'
]
deprecated_units = []
for unit in simple_units + deprecated_units:
names[unit] = getattr(u, unit)
for unit in deprecated_units:
deprecated_names.add(unit)
return names, deprecated_names, []
@classmethod
def _validate_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
"Unit '{0}' not supported by the FITS standard. {1}".format(
unit, utils.did_you_mean_units(
unit, cls._units, cls._deprecated_units,
cls._to_decomposed_alternative)))
else:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], 'FITS',
cls._to_decomposed_alternative)
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
cls._validate_unit(unit)
return cls._units[unit]
@classmethod
def _get_unit_name(cls, unit):
name = unit.get_format_name('fits')
cls._validate_unit(name)
return name
@classmethod
def to_string(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
parts = []
if isinstance(unit, core.CompositeUnit):
base = np.log10(unit.scale)
if base % 1.0 != 0.0:
raise core.UnitScaleError(
"The FITS unit format is not able to represent scales "
"that are not powers of 10. Multiply your data by "
"{0:e}.".format(unit.scale))
elif unit.scale != 1.0:
parts.append('10**{0}'.format(int(base)))
pairs = list(zip(unit.bases, unit.powers))
if len(pairs):
pairs.sort(key=operator.itemgetter(1), reverse=True)
parts.append(cls._format_unit_list(pairs))
s = ' '.join(parts)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
@classmethod
def _to_decomposed_alternative(cls, unit):
try:
s = cls.to_string(unit)
except core.UnitScaleError:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return '{0} (with data multiplied by {1})'.format(
cls.to_string(unit), scale)
return s
@classmethod
def parse(cls, s, debug=False):
result = super().parse(s, debug)
if hasattr(result, 'function_unit'):
raise ValueError("Function units are not yet supported for "
"FITS units.")
return result
|
29c8f4e8bb87734f38133c4a5f25ec95d6b57ce325d223eac11f9c2edee88e2b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
Handles a "generic" string format for units
"""
import os
import re
import warnings
from . import core, utils
from .base import Base
from astropy.utils import classproperty
from astropy.utils.misc import did_you_mean
def _to_string(cls, unit):
if isinstance(unit, core.CompositeUnit):
parts = []
if cls._show_scale and unit.scale != 1:
parts.append('{0:g}'.format(unit.scale))
if len(unit.bases):
positives, negatives = utils.get_grouped_by_powers(
unit.bases, unit.powers)
if len(positives):
parts.append(cls._format_unit_list(positives))
elif len(parts) == 0:
parts.append('1')
if len(negatives):
parts.append('/')
unit_list = cls._format_unit_list(negatives)
if len(negatives) == 1:
parts.append('{0}'.format(unit_list))
else:
parts.append('({0})'.format(unit_list))
return ' '.join(parts)
elif isinstance(unit, core.NamedUnit):
return cls._get_unit_name(unit)
class Generic(Base):
"""
A "generic" format.
The syntax of the format is based directly on the FITS standard,
but instead of only supporting the units that FITS knows about, it
supports any unit available in the `astropy.units` namespace.
"""
_show_scale = True
_tokens = (
'DOUBLE_STAR',
'STAR',
'PERIOD',
'SOLIDUS',
'CARET',
'OPEN_PAREN',
'CLOSE_PAREN',
'FUNCNAME',
'UNIT',
'SIGN',
'UINT',
'UFLOAT'
)
@classproperty(lazy=True)
def _all_units(cls):
return cls._generate_unit_names()
@classproperty(lazy=True)
def _units(cls):
return cls._all_units[0]
@classproperty(lazy=True)
def _deprecated_units(cls):
return cls._all_units[1]
@classproperty(lazy=True)
def _functions(cls):
return cls._all_units[2]
@classproperty(lazy=True)
def _parser(cls):
return cls._make_parser()
@classproperty(lazy=True)
def _lexer(cls):
return cls._make_lexer()
@classmethod
def _make_lexer(cls):
from astropy.extern.ply import lex
tokens = cls._tokens
t_STAR = r'\*'
t_PERIOD = r'\.'
t_SOLIDUS = r'/'
t_DOUBLE_STAR = r'\*\*'
t_CARET = r'\^'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'((\d+\.?\d*)|(\.\d+))([eE][+-]?\d+)?'
if not re.search(r'[eE\.]', t.value):
t.type = 'UINT'
t.value = int(t.value)
elif t.value.endswith('.'):
t.type = 'UINT'
t.value = int(t.value[:-1])
else:
t.value = float(t.value)
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+-](?=\d)'
t.value = float(t.value + '1')
return t
# This needs to be a function so we can force it to happen
# before t_UNIT
def t_FUNCNAME(t):
r'((sqrt)|(ln)|(exp)|(log)|(mag)|(dB)|(dex))(?=\ *\()'
return t
def t_UNIT(t):
r"%|([YZEPTGMkhdcmunpfazy]?'((?!\d)\w)+')|((?!\d)\w)+"
t.value = cls._get_unit(t)
return t
t_ignore = ' '
# Error handling rule
def t_error(t):
raise ValueError(
"Invalid character at col {0}".format(t.lexpos))
lexer_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'generic_lextab.py'))
lexer = lex.lex(optimize=True, lextab='generic_lextab',
outputdir=os.path.dirname(__file__),
reflags=int(re.UNICODE))
if not lexer_exists:
cls._add_tab_header('generic_lextab')
return lexer
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the `FITS
standard
<http://fits.gsfc.nasa.gov/standard30/fits_standard30aa.pdf>`_,
Section 4.3, which is not terribly precise. The exact grammar
is here is based on the YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
This same grammar is used by the `"fits"` and `"vounit"`
formats, the only difference being the set of available unit
strings.
"""
from astropy.extern.ply import yacc
tokens = cls._tokens
def p_main(p):
'''
main : product_of_units
| factor product_of_units
| factor product product_of_units
| division_product_of_units
| factor division_product_of_units
| factor product division_product_of_units
| inverse_unit
| factor inverse_unit
| factor product inverse_unit
| factor
'''
from astropy.units.core import Unit
if len(p) == 2:
p[0] = Unit(p[1])
elif len(p) == 3:
p[0] = Unit(p[1] * p[2])
elif len(p) == 4:
p[0] = Unit(p[1] * p[3])
def p_division_product_of_units(p):
'''
division_product_of_units : division_product_of_units division product_of_units
| product_of_units
'''
from astropy.units.core import Unit
if len(p) == 4:
p[0] = Unit(p[1] / p[3])
else:
p[0] = p[1]
def p_inverse_unit(p):
'''
inverse_unit : division unit_expression
'''
p[0] = p[2] ** -1
def p_factor(p):
'''
factor : factor_fits
| factor_float
| factor_int
'''
p[0] = p[1]
def p_factor_float(p):
'''
factor_float : signed_float
| signed_float UINT signed_int
| signed_float UINT power numeric_power
'''
if cls.name == 'fits':
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 4:
p[0] = p[1] * p[2] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** float(p[4])
elif len(p) == 2:
p[0] = p[1]
def p_factor_int(p):
'''
factor_int : UINT
| UINT signed_int
| UINT power numeric_power
| UINT UINT signed_int
| UINT UINT power numeric_power
'''
if cls.name == 'fits':
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** float(p[2])
elif len(p) == 4:
if isinstance(p[2], int):
p[0] = p[1] * p[2] ** float(p[3])
else:
p[0] = p[1] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** p[4]
def p_factor_fits(p):
'''
factor_fits : UINT power OPEN_PAREN signed_int CLOSE_PAREN
| UINT power OPEN_PAREN UINT CLOSE_PAREN
| UINT power signed_int
| UINT power UINT
| UINT SIGN UINT
| UINT OPEN_PAREN signed_int CLOSE_PAREN
'''
if p[1] != 10:
if cls.name == 'fits':
raise ValueError("Base must be 10")
else:
return
if len(p) == 4:
if p[2] in ('**', '^'):
p[0] = 10 ** p[3]
else:
p[0] = 10 ** (p[2] * p[3])
elif len(p) == 5:
p[0] = 10 ** p[3]
elif len(p) == 6:
p[0] = 10 ** p[4]
def p_product_of_units(p):
'''
product_of_units : unit_expression product product_of_units
| unit_expression product_of_units
| unit_expression
'''
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1] * p[3]
def p_unit_expression(p):
'''
unit_expression : function
| unit_with_power
| OPEN_PAREN product_of_units CLOSE_PAREN
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_unit_with_power(p):
'''
unit_with_power : UNIT power numeric_power
| UNIT numeric_power
| UNIT
'''
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** p[2]
else:
p[0] = p[1] ** p[3]
def p_numeric_power(p):
'''
numeric_power : sign UINT
| OPEN_PAREN paren_expr CLOSE_PAREN
'''
if len(p) == 3:
p[0] = p[1] * p[2]
elif len(p) == 4:
p[0] = p[2]
def p_paren_expr(p):
'''
paren_expr : sign UINT
| signed_float
| frac
'''
if len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1]
def p_frac(p):
'''
frac : sign UINT division sign UINT
'''
p[0] = (p[1] * p[2]) / (p[4] * p[5])
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_product(p):
'''
product : STAR
| PERIOD
'''
pass
def p_division(p):
'''
division : SOLIDUS
'''
pass
def p_power(p):
'''
power : DOUBLE_STAR
| CARET
'''
p[0] = p[1]
def p_signed_int(p):
'''
signed_int : SIGN UINT
'''
p[0] = p[1] * p[2]
def p_signed_float(p):
'''
signed_float : sign UINT
| sign UFLOAT
'''
p[0] = p[1] * p[2]
def p_function_name(p):
'''
function_name : FUNCNAME
'''
p[0] = p[1]
def p_function(p):
'''
function : function_name OPEN_PAREN main CLOSE_PAREN
'''
if p[1] == 'sqrt':
p[0] = p[3] ** 0.5
return
elif p[1] in ('mag', 'dB', 'dex'):
function_unit = cls._parse_unit(p[1])
# In Generic, this is callable, but that does not have to
# be the case in subclasses (e.g., in VOUnit it is not).
if callable(function_unit):
p[0] = function_unit(p[3])
return
raise ValueError("'{0}' is not a recognized function".format(p[1]))
def p_error(p):
raise ValueError()
parser_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'generic_parsetab.py'))
parser = yacc.yacc(debug=False, tabmodule='generic_parsetab',
outputdir=os.path.dirname(__file__))
if not parser_exists:
cls._add_tab_header('generic_parsetab')
return parser
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
raise ValueError(
"At col {0}, {1}".format(
t.lexpos, str(e)))
@classmethod
def _parse_unit(cls, s, detailed_exception=True):
registry = core.get_current_unit_registry().registry
if s == '%':
return registry['percent']
elif s in registry:
return registry[s]
if detailed_exception:
raise ValueError(
'{0} is not a valid unit. {1}'.format(
s, did_you_mean(s, registry)))
else:
raise ValueError()
@classmethod
def parse(cls, s, debug=False):
if not isinstance(s, str):
s = s.decode('ascii')
result = cls._do_parse(s, debug=debug)
if s.count('/') > 1:
warnings.warn(
"'{0}' contains multiple slashes, which is "
"discouraged by the FITS standard".format(s),
core.UnitsWarning)
return result
@classmethod
def _do_parse(cls, s, debug=False):
try:
# This is a short circuit for the case where the string
# is just a single unit name
return cls._parse_unit(s, detailed_exception=False)
except ValueError as e:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if str(e):
raise
else:
raise ValueError(
"Syntax error parsing unit '{0}'".format(s))
@classmethod
def _get_unit_name(cls, unit):
return unit.get_format_name('generic')
@classmethod
def _format_unit_list(cls, units):
out = []
units.sort(key=lambda x: cls._get_unit_name(x[0]).lower())
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
power = utils.format_power(power)
if '/' in power:
out.append('{0}({1})'.format(
cls._get_unit_name(base), power))
else:
out.append('{0}{1}'.format(
cls._get_unit_name(base), power))
return ' '.join(out)
@classmethod
def to_string(cls, unit):
return _to_string(cls, unit)
class Unscaled(Generic):
"""
A format that doesn't display the scale part of the unit, other
than that, it is identical to the `Generic` format.
This is used in some error messages where the scale is irrelevant.
"""
_show_scale = False
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.