hash
stringlengths
64
64
content
stringlengths
0
1.51M
5da8386945a24efd81a0e2ae5b91f861365eb4b977bf3ea17142e1fb1314a793
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Functions to do XML schema and DTD validation. At the moment, this makes a subprocess call to xmllint. This could use a Python-based library at some point in the future, if something appropriate could be found. """ import os import subprocess def validate_schema(filename, schema_file): """ Validates an XML file against a schema or DTD. Parameters ---------- filename : str The path to the XML file to validate schema_file : str The path to the XML schema or DTD Returns ------- returncode, stdout, stderr : int, str, str Returns the returncode from xmllint and the stdout and stderr as strings """ base, ext = os.path.splitext(schema_file) if ext == '.xsd': schema_part = '--schema ' + schema_file elif ext == '.dtd': schema_part = '--dtdvalid ' + schema_file else: raise TypeError("schema_file must be a path to an XML Schema or DTD") p = subprocess.Popen( "xmllint --noout --nonet {} {}".format(schema_part, filename), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode == 127: raise OSError( "xmllint not found, so can not validate schema") elif p.returncode < 0: from ..misc import signal_number_to_name raise OSError( "xmllint was terminated by signal '{0}'".format( signal_number_to_name(-p.returncode))) return p.returncode, stdout, stderr
de0df7703512ff95c671f33c5a3a045ce4e41ab5cc7e923585055e432fda3138
# Licensed under a 3-clause BSD style license - see LICENSE.rst import json import os from datetime import datetime import locale import pytest import numpy as np from .. import data, misc def test_isiterable(): assert misc.isiterable(2) is False assert misc.isiterable([2]) is True assert misc.isiterable([1, 2, 3]) is True assert misc.isiterable(np.array(2)) is False assert misc.isiterable(np.array([1, 2, 3])) is True def test_signal_number_to_name_no_failure(): # Regression test for #5340: ensure signal_number_to_name throws no # AttributeError (it used ".iteritems()" which was removed in Python3). misc.signal_number_to_name(0) @pytest.mark.remote_data def test_api_lookup(): strurl = misc.find_api_page('astropy.utils.misc', 'dev', False, timeout=3) objurl = misc.find_api_page(misc, 'dev', False, timeout=3) assert strurl == objurl assert strurl == 'http://devdocs.astropy.org/utils/index.html#module-astropy.utils.misc' def test_skip_hidden(): path = data._find_pkg_data_path('data') for root, dirs, files in os.walk(path): assert '.hidden_file.txt' in files assert 'local.dat' in files # break after the first level since the data dir contains some other # subdirectories that don't have these files break for root, dirs, files in misc.walk_skip_hidden(path): assert '.hidden_file.txt' not in files assert 'local.dat' in files break def test_JsonCustomEncoder(): from ... import units as u assert json.dumps(np.arange(3), cls=misc.JsonCustomEncoder) == '[0, 1, 2]' assert json.dumps(1+2j, cls=misc.JsonCustomEncoder) == '[1.0, 2.0]' assert json.dumps(set([1, 2, 1]), cls=misc.JsonCustomEncoder) == '[1, 2]' assert json.dumps(b'hello world \xc3\x85', cls=misc.JsonCustomEncoder) == '"hello world \\u00c5"' assert json.dumps({1: 2}, cls=misc.JsonCustomEncoder) == '{"1": 2}' # default assert json.dumps({1: u.m}, cls=misc.JsonCustomEncoder) == '{"1": "m"}' # Quantities tmp = json.dumps({'a': 5*u.cm}, cls=misc.JsonCustomEncoder) newd = json.loads(tmp) tmpd = {"a": {"unit": "cm", "value": 5.0}} assert newd == tmpd tmp2 = json.dumps({'a': np.arange(2)*u.cm}, cls=misc.JsonCustomEncoder) newd = json.loads(tmp2) tmpd = {"a": {"unit": "cm", "value": [0., 1.]}} assert newd == tmpd tmp3 = json.dumps({'a': np.arange(2)*u.erg/u.s}, cls=misc.JsonCustomEncoder) newd = json.loads(tmp3) tmpd = {"a": {"unit": "erg / s", "value": [0., 1.]}} assert newd == tmpd def test_inherit_docstrings(): class Base(metaclass=misc.InheritDocstrings): def __call__(self, *args): "FOO" pass class Subclass(Base): def __call__(self, *args): pass if Base.__call__.__doc__ is not None: # TODO: Maybe if __doc__ is None this test should be skipped instead? assert Subclass.__call__.__doc__ == "FOO" def test_set_locale(): # First, test if the required locales are available current = locale.setlocale(locale.LC_ALL) try: locale.setlocale(locale.LC_ALL, str('en_US')) locale.setlocale(locale.LC_ALL, str('de_DE')) except locale.Error as e: pytest.skip('Locale error: {}'.format(e)) finally: locale.setlocale(locale.LC_ALL, current) date = datetime(2000, 10, 1, 0, 0, 0) day_mon = date.strftime('%a, %b') with misc.set_locale('en_US'): assert date.strftime('%a, %b') == 'Sun, Oct' with misc.set_locale('de_DE'): assert date.strftime('%a, %b') == 'So, Okt' # Back to original assert date.strftime('%a, %b') == day_mon with misc.set_locale(current): assert date.strftime('%a, %b') == day_mon def test_check_broadcast(): assert misc.check_broadcast((10, 1), (3,)) == (10, 3) assert misc.check_broadcast((10, 1), (3,), (4, 1, 1, 3)) == (4, 1, 10, 3) with pytest.raises(ValueError): misc.check_broadcast((10, 2), (3,)) with pytest.raises(ValueError): misc.check_broadcast((10, 1), (3,), (4, 1, 2, 3)) def test_dtype_bytes_or_chars(): assert misc.dtype_bytes_or_chars(np.dtype(np.float64)) == 8 assert misc.dtype_bytes_or_chars(np.dtype(object)) is None assert misc.dtype_bytes_or_chars(np.dtype(np.int32)) == 4 assert misc.dtype_bytes_or_chars(np.array(b'12345').dtype) == 5 assert misc.dtype_bytes_or_chars(np.array(u'12345').dtype) == 5
402236b02576e946e1b2c8d2541b84da07717212c5c506864e356072246e0b76
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Test `astropy.utils.timer`. .. note:: The tests only compare rough estimates as performance is machine-dependent. """ # STDLIB import time # THIRD-PARTY import pytest import numpy as np # LOCAL from ..timer import RunTimePredictor from ...modeling.fitting import ModelsError def func_to_time(x): """This sleeps for y seconds for use with timing tests. .. math:: y = 5 * x - 10 """ y = 5.0 * np.asarray(x) - 10 time.sleep(y) return y def test_timer(): """Test function timer.""" p = RunTimePredictor(func_to_time) # --- These must run before data points are introduced. --- with pytest.raises(ValueError): p.do_fit() with pytest.raises(RuntimeError): p.predict_time(100) # --- These must run next to set up data points. --- p.time_func([2.02, 2.04, 2.1, 'a', 2.3]) p.time_func(2.2) # Test OrderedDict assert p._funcname == 'func_to_time' assert p._cache_bad == ['a'] k = list(p.results.keys()) v = list(p.results.values()) np.testing.assert_array_equal(k, [2.02, 2.04, 2.1, 2.3, 2.2]) np.testing.assert_allclose(v, [0.1, 0.2, 0.5, 1.5, 1.0]) # --- These should only run once baseline is established. --- with pytest.raises(ModelsError): a = p.do_fit(model='foo') with pytest.raises(ModelsError): a = p.do_fit(fitter='foo') a = p.do_fit() assert p._power == 1 # Perfect slope is 5, with 10% uncertainty assert 4.5 <= a[1] <= 5.5 # Perfect intercept is -10, with 1-sec uncertainty assert -11 <= a[0] <= -9 # --- These should only run once fitting is completed. --- # Perfect answer is 490, with 10% uncertainty t = p.predict_time(100) assert 441 <= t <= 539 # Repeated call to access cached run time t2 = p.predict_time(100) assert t == t2
3d591015e5a9b3dae5a5754c079705723faf9860ed12e5f5de581605918c8b02
# Licensed under a 3-clause BSD style license - see LICENSE.rst import io import pytest from ..xml import check, unescaper, writer def test_writer(): fh = io.StringIO() w = writer.XMLWriter(fh) with w.tag("html"): with w.tag("body"): w.data("This is the content") w.comment("comment") value = ''.join(fh.getvalue().split()) assert value == '<html><body>Thisisthecontent<!--comment--></body></html>' def test_check_id(): assert check.check_id("Fof32") assert check.check_id("_Fof32") assert not check.check_id("32Fof") def test_fix_id(): assert check.fix_id("Fof32") == "Fof32" assert check.fix_id("@#f") == "___f" def test_check_token(): assert check.check_token("token") assert not check.check_token("token\rtoken") def test_check_mime_content_type(): assert check.check_mime_content_type("image/jpeg") assert not check.check_mime_content_type("image") def test_check_anyuri(): assert check.check_anyuri("https://github.com/astropy/astropy") def test_unescape_all(): # str url_in = 'http://casu.ast.cam.ac.uk/ag/iphas-dsa%2FSubmitCone?' \ 'DSACAT=IDR&amp;amp;DSATAB=Emitters&amp;amp;' url_out = 'http://casu.ast.cam.ac.uk/ag/iphas-dsa/SubmitCone?' \ 'DSACAT=IDR&DSATAB=Emitters&' assert unescaper.unescape_all(url_in) == url_out # bytes url_in = b'http://casu.ast.cam.ac.uk/ag/iphas-dsa%2FSubmitCone?' \ b'DSACAT=IDR&amp;amp;DSATAB=Emitters&amp;amp;' url_out = b'http://casu.ast.cam.ac.uk/ag/iphas-dsa/SubmitCone?' \ b'DSACAT=IDR&DSATAB=Emitters&' assert unescaper.unescape_all(url_in) == url_out def test_escape_xml(): s = writer.xml_escape('This & That') assert type(s) == str assert s == 'This &amp; That' s = writer.xml_escape(1) assert type(s) == str assert s == '1' s = writer.xml_escape(b'This & That') assert type(s) == bytes assert s == b'This &amp; That' @pytest.mark.skipif('writer.HAS_BLEACH') def test_escape_xml_without_bleach(): fh = io.StringIO() w = writer.XMLWriter(fh) with pytest.raises(ValueError) as err: with w.xml_cleaning_method('bleach_clean'): pass assert 'bleach package is required when HTML escaping is disabled' in str(err) @pytest.mark.skipif('not writer.HAS_BLEACH') def test_escape_xml_with_bleach(): fh = io.StringIO() w = writer.XMLWriter(fh) # Turn off XML escaping, but still sanitize unsafe tags like <script> with w.xml_cleaning_method('bleach_clean'): w.start('td') w.data('<script>x</script> <em>OK</em>') w.end(indent=False) assert fh.getvalue() == '<td>&lt;script&gt;x&lt;/script&gt; <em>OK</em></td>\n' fh = io.StringIO() w = writer.XMLWriter(fh) # Default is True (all XML tags escaped) with w.xml_cleaning_method(): w.start('td') w.data('<script>x</script> <em>OK</em>') w.end(indent=False) assert fh.getvalue() == '<td>&lt;script&gt;x&lt;/script&gt; &lt;em&gt;OK&lt;/em&gt;</td>\n'
01644893af7dbaf5eefc41611ec5762f445693ff07daf58c1e3b68a300f28d89
import abc from collections import OrderedDict import pytest import numpy as np from ..metadata import MetaData, MergeConflictError, merge, enable_merge_strategies from ..metadata import common_dtype from ...utils import metadata from ...io import fits class OrderedDictSubclass(OrderedDict): pass class MetaBaseTest: __metaclass__ = abc.ABCMeta def test_none(self): d = self.test_class(*self.args) assert isinstance(d.meta, OrderedDict) assert len(d.meta) == 0 @pytest.mark.parametrize(('meta'), ([dict([('a', 1)]), OrderedDict([('a', 1)]), OrderedDictSubclass([('a', 1)])])) def test_mapping_init(self, meta): d = self.test_class(*self.args, meta=meta) assert type(d.meta) == type(meta) assert d.meta['a'] == 1 @pytest.mark.parametrize(('meta'), (["ceci n'est pas un meta", 1.2, [1, 2, 3]])) def test_non_mapping_init(self, meta): with pytest.raises(TypeError): self.test_class(*self.args, meta=meta) @pytest.mark.parametrize(('meta'), ([dict([('a', 1)]), OrderedDict([('a', 1)]), OrderedDictSubclass([('a', 1)])])) def test_mapping_set(self, meta): d = self.test_class(*self.args, meta=meta) assert type(d.meta) == type(meta) assert d.meta['a'] == 1 @pytest.mark.parametrize(('meta'), (["ceci n'est pas un meta", 1.2, [1, 2, 3]])) def test_non_mapping_set(self, meta): with pytest.raises(TypeError): d = self.test_class(*self.args, meta=meta) def test_meta_fits_header(self): header = fits.header.Header() header.set('observer', 'Edwin Hubble') header.set('exptime', '3600') d = self.test_class(*self.args, meta=header) assert d.meta['OBSERVER'] == 'Edwin Hubble' class ExampleData: meta = MetaData() def __init__(self, meta=None): self.meta = meta class TestMetaExampleData(MetaBaseTest): test_class = ExampleData args = () def test_metadata_merging_conflict_exception(): """Regression test for issue #3294. Ensure that an exception is raised when a metadata conflict exists and ``metadata_conflicts='error'`` has been set. """ data1 = ExampleData() data2 = ExampleData() data1.meta['somekey'] = {'x': 1, 'y': 1} data2.meta['somekey'] = {'x': 1, 'y': 999} with pytest.raises(MergeConflictError): merge(data1.meta, data2.meta, metadata_conflicts='error') def test_metadata_merging(): # Recursive merge meta1 = {'k1': {'k1': [1, 2], 'k2': 2}, 'k2': 2, 'k4': (1, 2)} meta2 = {'k1': {'k1': [3]}, 'k3': 3, 'k4': (3,)} out = merge(meta1, meta2, metadata_conflicts='error') assert out == {'k1': {'k2': 2, 'k1': [1, 2, 3]}, 'k2': 2, 'k3': 3, 'k4': (1, 2, 3)} # Merge two ndarrays meta1 = {'k1': np.array([1, 2])} meta2 = {'k1': np.array([3])} out = merge(meta1, meta2, metadata_conflicts='error') assert np.all(out['k1'] == np.array([1, 2, 3])) # Merge list and np.ndarray meta1 = {'k1': [1, 2]} meta2 = {'k1': np.array([3])} assert np.all(out['k1'] == np.array([1, 2, 3])) # Can't merge two scalar types meta1 = {'k1': 1} meta2 = {'k1': 2} with pytest.raises(MergeConflictError): merge(meta1, meta2, metadata_conflicts='error') # Conflicting shape meta1 = {'k1': np.array([1, 2])} meta2 = {'k1': np.array([[3]])} with pytest.raises(MergeConflictError): merge(meta1, meta2, metadata_conflicts='error') # Conflicting array type meta1 = {'k1': np.array([1, 2])} meta2 = {'k1': np.array(['3'])} with pytest.raises(MergeConflictError): merge(meta1, meta2, metadata_conflicts='error') # Conflicting array type with 'silent' merging meta1 = {'k1': np.array([1, 2])} meta2 = {'k1': np.array(['3'])} out = merge(meta1, meta2, metadata_conflicts='silent') assert np.all(out['k1'] == np.array(['3'])) def test_metadata_merging_new_strategy(): original_merge_strategies = list(metadata.MERGE_STRATEGIES) class MergeNumbersAsList(metadata.MergeStrategy): """ Scalar float or int values are joined in a list. """ types = ((int, float), (int, float)) @classmethod def merge(cls, left, right): return [left, right] class MergeConcatStrings(metadata.MergePlus): """ Scalar string values are concatenated """ types = (str, str) enabled = False # Normally can't merge two scalar types meta1 = {'k1': 1, 'k2': 'a'} meta2 = {'k1': 2, 'k2': 'b'} # Enable new merge strategy with enable_merge_strategies(MergeNumbersAsList, MergeConcatStrings): assert MergeNumbersAsList.enabled assert MergeConcatStrings.enabled out = merge(meta1, meta2, metadata_conflicts='error') assert out['k1'] == [1, 2] assert out['k2'] == 'ab' assert not MergeNumbersAsList.enabled assert not MergeConcatStrings.enabled # Confirm the default enabled=False behavior with pytest.raises(MergeConflictError): merge(meta1, meta2, metadata_conflicts='error') # Enable all MergeStrategy subclasses with enable_merge_strategies(metadata.MergeStrategy): assert MergeNumbersAsList.enabled assert MergeConcatStrings.enabled out = merge(meta1, meta2, metadata_conflicts='error') assert out['k1'] == [1, 2] assert out['k2'] == 'ab' assert not MergeNumbersAsList.enabled assert not MergeConcatStrings.enabled metadata.MERGE_STRATEGIES = original_merge_strategies def test_common_dtype_string(): u3 = np.array([u'123']) u4 = np.array([u'1234']) b3 = np.array([b'123']) b5 = np.array([b'12345']) assert common_dtype([u3, u4]).endswith('U4') assert common_dtype([b5, u4]).endswith('U5') assert common_dtype([b3, b5]).endswith('S5') def test_common_dtype_basic(): i8 = np.array(1, dtype=np.int64) f8 = np.array(1, dtype=np.float64) u3 = np.array(u'123') with pytest.raises(MergeConflictError): common_dtype([i8, u3]) assert common_dtype([i8, i8]).endswith('i8') assert common_dtype([i8, f8]).endswith('f8')
bc6377b1282df4f37e7538a6b3b4dabdca338b71f1e02a3aef549e29669bf04d
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from ..data_info import dtype_info_name STRING_TYPE_NAMES = {(True, 'S'): 'bytes', (True, 'U'): 'str'} DTYPE_TESTS = ((np.array(b'abcd').dtype, STRING_TYPE_NAMES[(True, 'S')] + '4'), (np.array(u'abcd').dtype, STRING_TYPE_NAMES[(True, 'U')] + '4'), ('S4', STRING_TYPE_NAMES[(True, 'S')] + '4'), ('U4', STRING_TYPE_NAMES[(True, 'U')] + '4'), (np.void, 'void'), (np.int32, 'int32'), (bool, 'bool'), (float, 'float64'), ('<f4', 'float32'), ('u8', 'uint64'), ('c16', 'complex128'), ('object', 'object')) @pytest.mark.parametrize('input,output', DTYPE_TESTS) def test_dtype_info_name(input, output): """ Test that dtype_info_name is giving the expected output Here the available types:: 'b' boolean 'i' (signed) integer 'u' unsigned integer 'f' floating-point 'c' complex-floating point 'O' (Python) objects 'S', 'a' (byte-)string 'U' Unicode 'V' raw data (void) """ assert dtype_info_name(input) == output
bcc896e3bd486ffa337241dd4b417a8aa40bce4af373517a0b55fde00c40978a
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import hashlib import os import pathlib import sys import tempfile import urllib.request import urllib.error import pytest from ..data import (_get_download_cache_locs, CacheMissingWarning, get_pkg_data_filename, get_readable_fileobj) from ...tests.helper import raises, catch_warnings TESTURL = 'http://www.astropy.org' # General file object function try: import bz2 # noqa except ImportError: HAS_BZ2 = False else: HAS_BZ2 = True try: import lzma # noqa except ImportError: HAS_XZ = False else: HAS_XZ = True @pytest.mark.remote_data('astropy') def test_download_nocache(): from ..data import download_file fnout = download_file(TESTURL) assert os.path.isfile(fnout) @pytest.mark.remote_data('astropy') def test_download_parallel(): import shelve from ..data import (download_file, download_files_in_parallel, _get_download_cache_locs, get_cached_urls) main_url = 'http://data.astropy.org/intersphinx/README' mirror_url = 'http://www.astropy.org/astropy-data/intersphinx/README' fnout = download_files_in_parallel([ 'http://data.astropy.org', main_url, mirror_url]) assert all([os.path.isfile(f) for f in fnout]), fnout # Now test that download_file looks in mirror's cache before download. # https://github.com/astropy/astropy/issues/6982 dldir, urlmapfn = _get_download_cache_locs() with shelve.open(urlmapfn) as url2hash: del url2hash[main_url] # NOTE: Cannot disable internet in a remote_data test, so comparing hash # should be good enough? # This test also tests for "assert TESTURL in get_cached_urls()". c_urls = get_cached_urls() assert ((download_file(main_url, cache=True) == download_file(mirror_url, cache=True)) and (mirror_url in c_urls) and (main_url not in c_urls)) @pytest.mark.remote_data('astropy') def test_download_noprogress(): from ..data import download_file fnout = download_file(TESTURL, show_progress=False) assert os.path.isfile(fnout) @pytest.mark.remote_data('astropy') def test_download_cache(): from ..data import download_file, clear_download_cache download_dir = _get_download_cache_locs()[0] # Download the test URL and make sure it exists, then clear just that # URL and make sure it got deleted. fnout = download_file(TESTURL, cache=True) assert os.path.isdir(download_dir) assert os.path.isfile(fnout) clear_download_cache(TESTURL) assert not os.path.exists(fnout) # Test issues raised in #4427 with clear_download_cache() without a URL, # followed by subsequent download. fnout = download_file(TESTURL, cache=True) assert os.path.isfile(fnout) clear_download_cache() assert not os.path.exists(fnout) assert not os.path.exists(download_dir) fnout = download_file(TESTURL, cache=True) assert os.path.isfile(fnout) # Clearing download cache succeeds even if the URL does not exist. clear_download_cache('http://this_was_never_downloaded_before.com') # Make sure lockdir was released lockdir = os.path.join(download_dir, 'lock') assert not os.path.isdir(lockdir), 'Cache dir lock was not released!' @pytest.mark.remote_data('astropy') def test_url_nocache(): from ..data import get_readable_fileobj with get_readable_fileobj(TESTURL, cache=False, encoding='utf-8') as page: assert page.read().find('Astropy') > -1 @pytest.mark.remote_data('astropy') def test_find_by_hash(): from ..data import get_readable_fileobj, get_pkg_data_filename, clear_download_cache with get_readable_fileobj(TESTURL, encoding="binary", cache=True) as page: hash = hashlib.md5(page.read()) hashstr = 'hash/' + hash.hexdigest() fnout = get_pkg_data_filename(hashstr) assert os.path.isfile(fnout) clear_download_cache(hashstr[5:]) assert not os.path.isfile(fnout) lockdir = os.path.join(_get_download_cache_locs()[0], 'lock') assert not os.path.isdir(lockdir), 'Cache dir lock was not released!' @pytest.mark.remote_data('astropy') def test_find_invalid(): from ..data import get_pkg_data_filename # this is of course not a real data file and not on any remote server, but # it should *try* to go to the remote server with pytest.raises(urllib.error.URLError): get_pkg_data_filename('kjfrhgjklahgiulrhgiuraehgiurhgiuhreglhurieghruelighiuerahiulruli') # Package data functions @pytest.mark.parametrize(('filename'), ['local.dat', 'local.dat.gz', 'local.dat.bz2', 'local.dat.xz']) def test_local_data_obj(filename): from ..data import get_pkg_data_fileobj if (not HAS_BZ2 and 'bz2' in filename) or (not HAS_XZ and 'xz' in filename): with pytest.raises(ValueError) as e: with get_pkg_data_fileobj(os.path.join('data', filename), encoding='binary') as f: f.readline() # assert f.read().rstrip() == b'CONTENT' assert ' format files are not supported' in str(e) else: with get_pkg_data_fileobj(os.path.join('data', filename), encoding='binary') as f: f.readline() assert f.read().rstrip() == b'CONTENT' @pytest.fixture(params=['invalid.dat.bz2', 'invalid.dat.gz']) def bad_compressed(request, tmpdir): # These contents have valid headers for their respective file formats, but # are otherwise malformed and invalid. bz_content = b'BZhinvalid' gz_content = b'\x1f\x8b\x08invalid' datafile = tmpdir.join(request.param) filename = datafile.strpath if filename.endswith('.bz2'): contents = bz_content elif filename.endswith('.gz'): contents = gz_content else: contents = 'invalid' datafile.write(contents, mode='wb') return filename def test_local_data_obj_invalid(bad_compressed): is_bz2 = bad_compressed.endswith('.bz2') is_xz = bad_compressed.endswith('.xz') # Note, since these invalid files are created on the fly in order to avoid # problems with detection by antivirus software # (see https://github.com/astropy/astropy/issues/6520), it is no longer # possible to use ``get_pkg_data_fileobj`` to read the files. Technically, # they're not local anymore: they just live in a temporary directory # created by pytest. However, we can still use get_readable_fileobj for the # test. if (not HAS_BZ2 and is_bz2) or (not HAS_XZ and is_xz): with pytest.raises(ValueError) as e: with get_readable_fileobj(bad_compressed, encoding='binary') as f: f.read() assert ' format files are not supported' in str(e) else: with get_readable_fileobj(bad_compressed, encoding='binary') as f: assert f.read().rstrip().endswith(b'invalid') def test_local_data_name(): from ..data import get_pkg_data_filename fnout = get_pkg_data_filename('data/local.dat') assert os.path.isfile(fnout) and fnout.endswith('local.dat') # TODO: if in the future, the root data/ directory is added in, the below # test should be uncommented and the README.rst should be replaced with # whatever file is there # get something in the astropy root # fnout2 = get_pkg_data_filename('../../data/README.rst') # assert os.path.isfile(fnout2) and fnout2.endswith('README.rst') def test_data_name_third_party_package(): """Regression test for issue #1256 Tests that `get_pkg_data_filename` works in a third-party package that doesn't make any relative imports from the module it's used from. Uses a test package under ``data/test_package``. """ # Get the actual data dir: data_dir = os.path.join(os.path.dirname(__file__), 'data') sys.path.insert(0, data_dir) try: import test_package filename = test_package.get_data_filename() assert filename == os.path.join(data_dir, 'test_package', 'data', 'foo.txt') finally: sys.path.pop(0) @raises(RuntimeError) def test_local_data_nonlocalfail(): from ..data import get_pkg_data_filename # this would go *outside* the atropy tree get_pkg_data_filename('../../../data/README.rst') def test_compute_hash(tmpdir): from ..data import compute_hash rands = b'1234567890abcdefghijklmnopqrstuvwxyz' filename = tmpdir.join('tmp.dat').strpath with open(filename, 'wb') as ntf: ntf.write(rands) ntf.flush() chhash = compute_hash(filename) shash = hashlib.md5(rands).hexdigest() assert chhash == shash def test_get_pkg_data_contents(): from ..data import get_pkg_data_fileobj, get_pkg_data_contents with get_pkg_data_fileobj('data/local.dat') as f: contents1 = f.read() contents2 = get_pkg_data_contents('data/local.dat') assert contents1 == contents2 @pytest.mark.remote_data('astropy') def test_data_noastropy_fallback(monkeypatch): """ Tests to make sure the default behavior when the cache directory can't be located is correct """ from .. import data from ...config import paths # needed for testing the *real* lock at the end lockdir = os.path.join(_get_download_cache_locs()[0], 'lock') # better yet, set the configuration to make sure the temp files are deleted data.conf.delete_temporary_downloads_at_exit = True # make sure the config and cache directories are not searched monkeypatch.setenv(str('XDG_CONFIG_HOME'), 'foo') monkeypatch.delenv(str('XDG_CONFIG_HOME')) monkeypatch.setenv(str('XDG_CACHE_HOME'), 'bar') monkeypatch.delenv(str('XDG_CACHE_HOME')) monkeypatch.setattr(paths.set_temp_config, '_temp_path', None) monkeypatch.setattr(paths.set_temp_cache, '_temp_path', None) # make sure the _find_or_create_astropy_dir function fails as though the # astropy dir could not be accessed def osraiser(dirnm, linkto): raise OSError monkeypatch.setattr(paths, '_find_or_create_astropy_dir', osraiser) with pytest.raises(OSError): # make sure the config dir search fails paths.get_cache_dir() # first try with cache with catch_warnings(CacheMissingWarning) as w: fnout = data.download_file(TESTURL, cache=True) assert os.path.isfile(fnout) assert len(w) > 1 w1 = w.pop(0) w2 = w.pop(0) assert w1.category == CacheMissingWarning assert 'Remote data cache could not be accessed' in w1.message.args[0] assert w2.category == CacheMissingWarning assert 'File downloaded to temporary location' in w2.message.args[0] assert fnout == w2.message.args[1] # clearing the cache should be a no-up that doesn't affect fnout with catch_warnings(CacheMissingWarning) as w: data.clear_download_cache(TESTURL) assert os.path.isfile(fnout) # now remove it so tests don't clutter up the temp dir this should get # called at exit, anyway, but we do it here just to make sure it's working # correctly data._deltemps() assert not os.path.isfile(fnout) assert len(w) > 0 w3 = w.pop() assert w3.category == data.CacheMissingWarning assert 'Not clearing data cache - cache inacessable' in str(w3.message) # now try with no cache with catch_warnings(CacheMissingWarning) as w: fnnocache = data.download_file(TESTURL, cache=False) with open(fnnocache, 'rb') as page: assert page.read().decode('utf-8').find('Astropy') > -1 # no warnings should be raise in fileobj because cache is unnecessary assert len(w) == 0 # lockdir determined above as the *real* lockdir, not the temp one assert not os.path.isdir(lockdir), 'Cache dir lock was not released!' @pytest.mark.parametrize(('filename'), [ 'unicode.txt', 'unicode.txt.gz', pytest.param('unicode.txt.bz2', marks=pytest.mark.xfail(not HAS_BZ2, reason='no bz2 support')), pytest.param('unicode.txt.xz', marks=pytest.mark.xfail(not HAS_XZ, reason='no lzma support'))]) def test_read_unicode(filename): from ..data import get_pkg_data_contents contents = get_pkg_data_contents(os.path.join('data', filename), encoding='utf-8') assert isinstance(contents, str) contents = contents.splitlines()[1] assert contents == "האסטרונומי פייתון" contents = get_pkg_data_contents(os.path.join('data', filename), encoding='binary') assert isinstance(contents, bytes) x = contents.splitlines()[1] assert x == (b"\xff\xd7\x94\xd7\x90\xd7\xa1\xd7\x98\xd7\xa8\xd7\x95\xd7\xa0" b"\xd7\x95\xd7\x9e\xd7\x99 \xd7\xa4\xd7\x99\xd7\x99\xd7\xaa\xd7\x95\xd7\x9f"[1:]) def test_compressed_stream(): import base64 from ..data import get_readable_fileobj gzipped_data = (b"H4sICIxwG1AAA2xvY2FsLmRhdAALycgsVkjLzElVANKlxakpCpl5CiUZqQ" b"olqcUl8Tn5yYk58SmJJYnxWmCRzLx0hbTSvOSSzPy8Yi5nf78QV78QLgAlLytnRQAAAA==") gzipped_data = base64.b64decode(gzipped_data) assert isinstance(gzipped_data, bytes) class FakeStream: """ A fake stream that has `read`, but no `seek`. """ def __init__(self, data): self.data = data def read(self, nbytes=None): if nbytes is None: result = self.data self.data = b'' else: result = self.data[:nbytes] self.data = self.data[nbytes:] return result stream = FakeStream(gzipped_data) with get_readable_fileobj(stream, encoding='binary') as f: f.readline() assert f.read().rstrip() == b'CONTENT' @pytest.mark.remote_data('astropy') def test_invalid_location_download(): """ checks that download_file gives a URLError and not an AttributeError, as its code pathway involves some fiddling with the exception. """ from ..data import download_file with pytest.raises(urllib.error.URLError): download_file('http://www.astropy.org/nonexistentfile') def test_invalid_location_download_noconnect(): """ checks that download_file gives an OSError if the socket is blocked """ from ..data import download_file # This should invoke socket's monkeypatched failure with pytest.raises(OSError): download_file('http://astropy.org/nonexistentfile') @pytest.mark.remote_data('astropy') def test_is_url_in_cache(): from ..data import download_file, is_url_in_cache assert not is_url_in_cache('http://astropy.org/nonexistentfile') download_file(TESTURL, cache=True, show_progress=False) assert is_url_in_cache(TESTURL) def test_get_readable_fileobj_cleans_up_temporary_files(tmpdir, monkeypatch): """checks that get_readable_fileobj leaves no temporary files behind""" # Create a 'file://' URL pointing to a path on the local filesystem local_filename = get_pkg_data_filename(os.path.join('data', 'local.dat')) url = 'file://' + urllib.request.pathname2url(local_filename) # Save temporary files to a known location monkeypatch.setattr(tempfile, 'tempdir', str(tmpdir)) # Call get_readable_fileobj() as a context manager with get_readable_fileobj(url): pass # Get listing of files in temporary directory tempdir_listing = tmpdir.listdir() # Assert that the temporary file was empty after get_readable_fileobj() # context manager finished running assert len(tempdir_listing) == 0 def test_path_objects_get_readable_fileobj(): fpath = pathlib.Path(get_pkg_data_filename(os.path.join('data', 'local.dat'))) with get_readable_fileobj(fpath) as f: assert f.read().rstrip() == ('This file is used in the test_local_data_* ' 'testing functions\nCONTENT')
edeced5fd15917a6190108ea7a6706f8f7918fdfb127c94b1b02a014d4ec351c
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- import io import numpy as np import pytest from . import test_progress_bar_func from .. import console from ... import units as u class FakeTTY(io.StringIO): """IOStream that fakes a TTY; provide an encoding to emulate an output stream with a specific encoding. """ def __new__(cls, encoding=None): # Return a new subclass of FakeTTY with the requested encoding if encoding is None: return super().__new__(cls) encoding = encoding cls = type(encoding.title() + cls.__name__, (cls,), {'encoding': encoding}) return cls.__new__(cls) def __init__(self, encoding=None): super().__init__() def write(self, s): if isinstance(s, bytes): # Just allow this case to work s = s.decode('latin-1') elif self.encoding is not None: s.encode(self.encoding) return super().write(s) def isatty(self): return True def test_fake_tty(): # First test without a specified encoding; we should be able to write # arbitrary unicode strings f1 = FakeTTY() assert f1.isatty() f1.write('☃') assert f1.getvalue() == '☃' # Now test an ASCII-only TTY--it should raise a UnicodeEncodeError when # trying to write a string containing non-ASCII characters f2 = FakeTTY('ascii') assert f2.isatty() assert f2.__class__.__name__ == 'AsciiFakeTTY' assert pytest.raises(UnicodeEncodeError, f2.write, '☃') assert f2.getvalue() == '' @pytest.mark.skipif(str("sys.platform.startswith('win')")) def test_color_text(): assert console._color_text("foo", "green") == '\033[0;32mfoo\033[0m' def test_color_print(): # This stuff is hard to test, at least smoke test it console.color_print("foo", "green") console.color_print("foo", "green", "bar", "red") def test_color_print2(): # Test that this automatically detects that io.StringIO is # not a tty stream = io.StringIO() console.color_print("foo", "green", file=stream) assert stream.getvalue() == 'foo\n' stream = io.StringIO() console.color_print("foo", "green", "bar", "red", "baz", file=stream) assert stream.getvalue() == 'foobarbaz\n' @pytest.mark.skipif(str("sys.platform.startswith('win')")) def test_color_print3(): # Test that this thinks the FakeTTY is a tty and applies colors. stream = FakeTTY() console.color_print("foo", "green", file=stream) assert stream.getvalue() == '\x1b[0;32mfoo\x1b[0m\n' stream = FakeTTY() console.color_print("foo", "green", "bar", "red", "baz", file=stream) assert stream.getvalue() == '\x1b[0;32mfoo\x1b[0m\x1b[0;31mbar\x1b[0mbaz\n' def test_color_print_unicode(): console.color_print("überbær", "red") def test_color_print_invalid_color(): console.color_print("foo", "unknown") def test_spinner_non_unicode_console(): """Regression test for #1760 Ensures that the spinner can fall go into fallback mode when using the unicode spinner on a terminal whose default encoding cannot encode the unicode characters. """ stream = FakeTTY('ascii') chars = console.Spinner._default_unicode_chars with console.Spinner("Reticulating splines", file=stream, chars=chars) as s: next(s) def test_progress_bar(): # This stuff is hard to test, at least smoke test it with console.ProgressBar(50) as bar: for i in range(50): bar.update() def test_progress_bar2(): for x in console.ProgressBar(range(50)): pass def test_progress_bar3(): def do_nothing(*args, **kwargs): pass console.ProgressBar.map(do_nothing, range(50)) def test_zero_progress_bar(): with console.ProgressBar(0) as bar: pass def test_progress_bar_as_generator(): sum = 0 for x in console.ProgressBar(range(50)): sum += x assert sum == 1225 sum = 0 for x in console.ProgressBar(50): sum += x assert sum == 1225 def test_progress_bar_map(): items = list(range(100)) result = console.ProgressBar.map(test_progress_bar_func.func, items, step=10, multiprocess=True) assert items == result @pytest.mark.parametrize(("seconds", "string"), [(864088, " 1w 3d"), (187213, " 2d 4h"), (3905, " 1h 5m"), (64, " 1m 4s"), (15, " 15s"), (2, " 2s")] ) def test_human_time(seconds, string): human_time = console.human_time(seconds) assert human_time == string @pytest.mark.parametrize(("size", "string"), [(8640882, "8.6M"), (187213, "187k"), (3905, "3.9k"), (64, " 64 "), (2, " 2 "), (10*u.GB, " 10G")] ) def test_human_file_size(size, string): human_time = console.human_file_size(size) assert human_time == string @pytest.mark.parametrize("size", (50*u.km, 100*u.g)) def test_bad_human_file_size(size): assert pytest.raises(u.UnitConversionError, console.human_file_size, size)
2372528dea8718b8637b5a661940aa02a0513848b5205e3fc19b14be01092d1f
# Licensed under a 3-clause BSD style license - see LICENSE.rst # namedtuple is needed for find_mod_objs so it can have a non-local module from collections import namedtuple import pytest from .. import introspection from ..introspection import (find_current_module, find_mod_objs, isinstancemethod, minversion) def test_pkg_finder(): """ Tests that the `find_current_module` function works. Note that this also implicitly tests compat.misc._patched_getmodule """ mod1 = 'astropy.utils.introspection' mod2 = 'astropy.utils.tests.test_introspection' mod3 = 'astropy.utils.tests.test_introspection' assert find_current_module(0).__name__ == mod1 assert find_current_module(1).__name__ == mod2 assert find_current_module(0, True).__name__ == mod3 def test_find_current_mod(): from sys import getrecursionlimit thismodnm = __name__ assert find_current_module(0) is introspection assert find_current_module(1).__name__ == thismodnm assert find_current_module(getrecursionlimit() + 1) is None assert find_current_module(0, True).__name__ == thismodnm assert find_current_module(0, [introspection]).__name__ == thismodnm assert find_current_module(0, ['astropy.utils.introspection']).__name__ == thismodnm with pytest.raises(ImportError): find_current_module(0, ['faddfdsasewrweriopunjlfiurrhujnkflgwhu']) def test_find_mod_objs(): lnms, fqns, objs = find_mod_objs('astropy') # this import is after the above call intentionally to make sure # find_mod_objs properly imports astropy on its own import astropy # just check for astropy.test ... other things might be added, so we # shouldn't check that it's the only thing assert 'test' in lnms assert astropy.test in objs lnms, fqns, objs = find_mod_objs(__name__, onlylocals=False) assert 'namedtuple' in lnms assert 'collections.namedtuple' in fqns assert namedtuple in objs lnms, fqns, objs = find_mod_objs(__name__, onlylocals=True) assert 'namedtuple' not in lnms assert 'collections.namedtuple' not in fqns assert namedtuple not in objs def _minversion_test(): from types import ModuleType test_module = ModuleType(str("test_module")) test_module.__version__ = '0.12.2' good_versions = ['0.12', '0.12.1', '0.12.0.dev'] bad_versions = ['1', '1.2rc1'] for version in good_versions: assert minversion(test_module, version) for version in bad_versions: assert not minversion(test_module, version) def test_minversion(): import sys if 'pkg_resources' in sys.modules: pkg_resources_saved = sys.modules['pkg_resources'] # Force ImportError for pkg_resources in minversion() sys.modules['pkg_resource'] = None _minversion_test() sys.modules['pkg_resource'] = pkg_resources_saved _minversion_test()
84eddb76e1fc796259ca122888297806d6fc0db836665f1a561b7f50646d0d0b
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from ...tests.helper import raises from .. import collections @raises(TypeError) def test_homogeneous_list(): l = collections.HomogeneousList(int) l.append(5.0) @raises(TypeError) def test_homogeneous_list2(): l = collections.HomogeneousList(int) l.extend([5.0]) def test_homogeneous_list3(): l = collections.HomogeneousList(int) l.append(5) assert l == [5] def test_homogeneous_list4(): l = collections.HomogeneousList(int) l.extend([5]) assert l == [5] @raises(TypeError) def test_homogeneous_list5(): l = collections.HomogeneousList(int, [1, 2, 3]) l[1] = 5.0 def test_homogeneous_list_setitem_works(): l = collections.HomogeneousList(int, [1, 2, 3]) l[1] = 5 assert l == [1, 5, 3] def test_homogeneous_list_setitem_works_with_slice(): l = collections.HomogeneousList(int, [1, 2, 3]) l[0:1] = [10, 20, 30] assert l == [10, 20, 30, 2, 3] l[:] = [5, 4, 3] assert l == [5, 4, 3] l[::2] = [2, 1] assert l == [2, 4, 1] def test_homogeneous_list_init_got_invalid_type(): with pytest.raises(TypeError): collections.HomogeneousList(int, [1, 2., 3]) def test_homogeneous_list_works_with_generators(): hl = collections.HomogeneousList(int, (i for i in range(3))) assert hl == [0, 1, 2] hl = collections.HomogeneousList(int) hl.extend(i for i in range(3)) assert hl == [0, 1, 2] hl = collections.HomogeneousList(int) hl[0:1] = (i for i in range(3)) assert hl == [0, 1, 2] hl = collections.HomogeneousList(int) hl += (i for i in range(3)) assert hl == [0, 1, 2]
1c80498c2f1f985b73e7e1d0af6a73b2fb1d91958d0d5b956766f71908b736ae
# Licensed under a 3-clause BSD style license - see LICENSE.rst import functools import inspect import pickle import pytest from ..decorators import (deprecated_attribute, deprecated, wraps, sharedmethod, classproperty, format_doc, deprecated_renamed_argument) from ..exceptions import AstropyDeprecationWarning, AstropyUserWarning from ...tests.helper import catch_warnings def test_wraps(): """ Tests the compatibility replacement for functools.wraps which supports argument preservation across all supported Python versions. """ def foo(a, b, c=1, d=2, e=3, **kwargs): """A test function.""" return a, b, c, d, e, kwargs @wraps(foo) def bar(*args, **kwargs): return ('test',) + foo(*args, **kwargs) expected = ('test', 1, 2, 3, 4, 5, {'f': 6, 'g': 7}) assert bar(1, 2, 3, 4, 5, f=6, g=7) == expected assert bar.__name__ == 'foo' if foo.__doc__ is not None: # May happen if using optimized opcode assert bar.__doc__ == "A test function." if hasattr(foo, '__qualname__'): assert bar.__qualname__ == foo.__qualname__ argspec = inspect.getfullargspec(bar) assert argspec.varkw == 'kwargs' assert argspec.args == ['a', 'b', 'c', 'd', 'e'] assert argspec.defaults == (1, 2, 3) def test_wraps_exclude_names(): """ Test the optional ``exclude_names`` argument to the wraps decorator. """ # This particular test demonstrates wrapping an instance method # as a function and excluding the "self" argument: class TestClass: def method(self, a, b, c=1, d=2, **kwargs): return (a, b, c, d, kwargs) test = TestClass() @wraps(test.method, exclude_args=('self',)) def func(*args, **kwargs): return test.method(*args, **kwargs) argspec = inspect.getfullargspec(func) assert argspec.args == ['a', 'b', 'c', 'd'] assert func('a', 'b', e=3) == ('a', 'b', 1, 2, {'e': 3}) def test_wraps_keep_orig_name(): """ Test that when __name__ is excluded from the ``assigned`` argument to ``wrap`` that the function being wrapped keeps its original name. Regression test for https://github.com/astropy/astropy/pull/4016 """ def foo(): pass assigned = list(functools.WRAPPER_ASSIGNMENTS) assigned.remove('__name__') def bar(): pass orig_bar = bar bar = wraps(foo, assigned=assigned)(bar) assert bar is not orig_bar assert bar.__name__ == 'bar' def test_deprecated_attribute(): class DummyClass: def __init__(self): self._foo = 42 def set_private(self): self._foo = 100 foo = deprecated_attribute('foo', '0.2') dummy = DummyClass() with catch_warnings(AstropyDeprecationWarning) as w: x = dummy.foo assert len(w) == 1 assert str(w[0].message) == ("The foo attribute is deprecated and may be " "removed in a future version.") with catch_warnings() as w: dummy.set_private() assert len(w) == 0 # This needs to be defined outside of the test function, because we # want to try to pickle it. @deprecated('100.0') class TA: """ This is the class docstring. """ def __init__(self): """ This is the __init__ docstring """ pass class TMeta(type): metaclass_attr = 1 @deprecated('100.0') class TB(metaclass=TMeta): pass def test_deprecated_class(): orig_A = TA.__bases__[0] # The only thing that should be different about the new class # is __doc__, __init__, __bases__ and __subclasshook__. # and __init_subclass__ for Python 3.6+. for x in dir(orig_A): if x not in ('__doc__', '__init__', '__bases__', '__dict__', '__subclasshook__', '__init_subclass__'): assert getattr(TA, x) == getattr(orig_A, x) with catch_warnings(AstropyDeprecationWarning) as w: TA() assert len(w) == 1 if TA.__doc__ is not None: assert 'function' not in TA.__doc__ assert 'deprecated' in TA.__doc__ assert 'function' not in TA.__init__.__doc__ assert 'deprecated' in TA.__init__.__doc__ # Make sure the object is picklable pickle.dumps(TA) def test_deprecated_class_with_new_method(): """ Test that a class with __new__ method still works even if it accepts additional arguments. This previously failed because the deprecated decorator would wrap objects __init__ which takes no arguments. """ @deprecated('1.0') class A: def __new__(cls, a): return super().__new__(cls) # Creating an instance should work but raise a DeprecationWarning with catch_warnings(AstropyDeprecationWarning) as w: A(1) assert len(w) == 1 @deprecated('1.0') class B: def __new__(cls, a): return super().__new__(cls) def __init__(self, a): pass # Creating an instance should work but raise a DeprecationWarning with catch_warnings(AstropyDeprecationWarning) as w: B(1) assert len(w) == 1 def test_deprecated_class_with_super(): """ Regression test for an issue where classes that used `super()` in their ``__init__`` did not actually call the correct class's ``__init__`` in the MRO. """ @deprecated('100.0') class TB: def __init__(self, a, b): super().__init__() with catch_warnings(AstropyDeprecationWarning) as w: TB(1, 2) assert len(w) == 1 if TB.__doc__ is not None: assert 'function' not in TB.__doc__ assert 'deprecated' in TB.__doc__ assert 'function' not in TB.__init__.__doc__ assert 'deprecated' in TB.__init__.__doc__ def test_deprecated_class_with_custom_metaclass(): """ Regression test for an issue where deprecating a class with a metaclass other than type did not restore the metaclass properly. """ with catch_warnings(AstropyDeprecationWarning) as w: TB() assert len(w) == 1 assert type(TB) is TMeta assert TB.metaclass_attr == 1 def test_deprecated_static_and_classmethod(): """ Regression test for issue introduced by https://github.com/astropy/astropy/pull/2811 and mentioned also here: https://github.com/astropy/astropy/pull/2580#issuecomment-51049969 where it appears that deprecated staticmethods didn't work on Python 2.6. """ class A: """Docstring""" @deprecated('1.0') @staticmethod def B(): pass @deprecated('1.0') @classmethod def C(cls): pass with catch_warnings(AstropyDeprecationWarning) as w: A.B() assert len(w) == 1 if A.__doc__ is not None: assert 'deprecated' in A.B.__doc__ with catch_warnings(AstropyDeprecationWarning) as w: A.C() assert len(w) == 1 if A.__doc__ is not None: assert 'deprecated' in A.C.__doc__ def test_deprecated_argument(): # Tests the decorator with function, method, staticmethod and classmethod. class Test: @classmethod @deprecated_renamed_argument('clobber', 'overwrite', '1.3') def test1(cls, overwrite): return overwrite @staticmethod @deprecated_renamed_argument('clobber', 'overwrite', '1.3') def test2(overwrite): return overwrite @deprecated_renamed_argument('clobber', 'overwrite', '1.3') def test3(self, overwrite): return overwrite @deprecated_renamed_argument('clobber', 'overwrite', '1.3', relax=False) def test1(overwrite): return overwrite for method in [Test().test1, Test().test2, Test().test3, test1]: # As positional argument only assert method(1) == 1 # As new keyword argument assert method(overwrite=1) == 1 # Using the deprecated name with catch_warnings(AstropyDeprecationWarning) as w: assert method(clobber=1) == 1 assert len(w) == 1 assert '1.3' in str(w[0].message) assert 'test_decorators.py' in str(w[0].filename) # Using both. Both keyword with pytest.raises(TypeError): method(clobber=2, overwrite=1) # One positional, one keyword with pytest.raises(TypeError): method(1, clobber=2) def test_deprecated_argument_in_kwargs(): # To rename an argument that is consumed by "kwargs" the "arg_in_kwargs" # parameter is used. @deprecated_renamed_argument('clobber', 'overwrite', '1.3', arg_in_kwargs=True) def test(**kwargs): return kwargs['overwrite'] # As positional argument only with pytest.raises(TypeError): test(1) # As new keyword argument assert test(overwrite=1) == 1 # Using the deprecated name with catch_warnings(AstropyDeprecationWarning) as w: assert test(clobber=1) == 1 assert len(w) == 1 assert '1.3' in str(w[0].message) assert 'test_decorators.py' in str(w[0].filename) # Using both. Both keyword with pytest.raises(TypeError): test(clobber=2, overwrite=1) # One positional, one keyword with pytest.raises(TypeError): test(1, clobber=2) def test_deprecated_argument_relaxed(): # Relax turns the TypeError if both old and new keyword are used into # a warning. @deprecated_renamed_argument('clobber', 'overwrite', '1.3', relax=True) def test(overwrite): return overwrite # As positional argument only assert test(1) == 1 # As new keyword argument assert test(overwrite=1) == 1 # Using the deprecated name with catch_warnings(AstropyDeprecationWarning) as w: assert test(clobber=1) == 1 assert len(w) == 1 assert '1.3' in str(w[0].message) # Using both. Both keyword with catch_warnings(AstropyUserWarning) as w: assert test(clobber=2, overwrite=1) == 1 assert len(w) == 1 # One positional, one keyword with catch_warnings(AstropyUserWarning) as w: assert test(1, clobber=2) == 1 assert len(w) == 1 def test_deprecated_argument_pending(): # Relax turns the TypeError if both old and new keyword are used into # a warning. @deprecated_renamed_argument('clobber', 'overwrite', '1.3', pending=True) def test(overwrite): return overwrite # As positional argument only assert test(1) == 1 # As new keyword argument assert test(overwrite=1) == 1 # Using the deprecated name with catch_warnings(AstropyUserWarning, AstropyDeprecationWarning) as w: assert test(clobber=1) == 1 assert len(w) == 0 # Using both. Both keyword with catch_warnings(AstropyUserWarning, AstropyDeprecationWarning) as w: assert test(clobber=2, overwrite=1) == 1 assert len(w) == 0 # One positional, one keyword with catch_warnings(AstropyUserWarning, AstropyDeprecationWarning) as w: assert test(1, clobber=2) == 1 assert len(w) == 0 def test_deprecated_argument_multi_deprecation(): @deprecated_renamed_argument(['x', 'y', 'z'], ['a', 'b', 'c'], [1.3, 1.2, 1.3], relax=True) def test(a, b, c): return a, b, c with catch_warnings(AstropyDeprecationWarning) as w: assert test(x=1, y=2, z=3) == (1, 2, 3) assert len(w) == 3 # Make sure relax is valid for all arguments with catch_warnings(AstropyUserWarning) as w: assert test(x=1, y=2, z=3, b=3) == (1, 3, 3) assert len(w) == 1 with catch_warnings(AstropyUserWarning) as w: assert test(x=1, y=2, z=3, a=3) == (3, 2, 3) assert len(w) == 1 with catch_warnings(AstropyUserWarning) as w: assert test(x=1, y=2, z=3, c=5) == (1, 2, 5) assert len(w) == 1 def test_deprecated_argument_multi_deprecation_2(): @deprecated_renamed_argument(['x', 'y', 'z'], ['a', 'b', 'c'], [1.3, 1.2, 1.3], relax=[True, True, False]) def test(a, b, c): return a, b, c with catch_warnings(AstropyUserWarning) as w: assert test(x=1, y=2, z=3, b=3) == (1, 3, 3) assert len(w) == 1 with catch_warnings(AstropyUserWarning) as w: assert test(x=1, y=2, z=3, a=3) == (3, 2, 3) assert len(w) == 1 with pytest.raises(TypeError): assert test(x=1, y=2, z=3, c=5) == (1, 2, 5) def test_deprecated_argument_not_allowed_use(): # If the argument is supposed to be inside the kwargs one needs to set the # arg_in_kwargs parameter. Without it it raises a TypeError. with pytest.raises(TypeError): @deprecated_renamed_argument('clobber', 'overwrite', '1.3') def test1(**kwargs): return kwargs['overwrite'] # Cannot replace "*args". with pytest.raises(TypeError): @deprecated_renamed_argument('overwrite', 'args', '1.3') def test2(*args): return args # Cannot replace "**kwargs". with pytest.raises(TypeError): @deprecated_renamed_argument('overwrite', 'kwargs', '1.3') def test3(**kwargs): return kwargs def test_sharedmethod_reuse_on_subclasses(): """ Regression test for an issue where sharedmethod would bind to one class for all time, causing the same method not to work properly on other subclasses of that class. It has the same problem when the same sharedmethod is called on different instances of some class as well. """ class AMeta(type): def foo(cls): return cls.x class A: x = 3 def __init__(self, x): self.x = x @sharedmethod def foo(self): return self.x a1 = A(1) a2 = A(2) assert a1.foo() == 1 assert a2.foo() == 2 # Similar test now, but for multiple subclasses using the same sharedmethod # as a classmethod assert A.foo() == 3 class B(A): x = 5 assert B.foo() == 5 def test_classproperty_docstring(): """ Tests that the docstring is set correctly on classproperties. This failed previously due to a bug in Python that didn't always set __doc__ properly on instances of property subclasses. """ class A: # Inherits docstring from getter @classproperty def foo(cls): """The foo.""" return 1 assert A.__dict__['foo'].__doc__ == "The foo." class B: # Use doc passed to classproperty constructor def _get_foo(cls): return 1 foo = classproperty(_get_foo, doc="The foo.") assert B.__dict__['foo'].__doc__ == "The foo." def test_format_doc_stringInput_simple(): # Simple tests with string input docstring_fail = '' # Raises an valueerror if input is empty with pytest.raises(ValueError): @format_doc(docstring_fail) def testfunc_fail(): pass docstring = 'test' # A first test that replaces an empty docstring @format_doc(docstring) def testfunc_1(): pass assert inspect.getdoc(testfunc_1) == docstring # Test that it replaces an existing docstring @format_doc(docstring) def testfunc_2(): '''not test''' pass assert inspect.getdoc(testfunc_2) == docstring def test_format_doc_stringInput_format(): # Tests with string input and formatting docstring = 'yes {0} no {opt}' # Raises an indexerror if not given the formatted args and kwargs with pytest.raises(IndexError): @format_doc(docstring) def testfunc1(): pass # Test that the formatting is done right @format_doc(docstring, '/', opt='= life') def testfunc2(): pass assert inspect.getdoc(testfunc2) == 'yes / no = life' # Test that we can include the original docstring docstring2 = 'yes {0} no {__doc__}' @format_doc(docstring2, '/') def testfunc3(): '''= 2 / 2 * life''' pass assert inspect.getdoc(testfunc3) == 'yes / no = 2 / 2 * life' def test_format_doc_objectInput_simple(): # Simple tests with object input def docstring_fail(): pass # Self input while the function has no docstring raises an error with pytest.raises(ValueError): @format_doc(docstring_fail) def testfunc_fail(): pass def docstring0(): '''test''' pass # A first test that replaces an empty docstring @format_doc(docstring0) def testfunc_1(): pass assert inspect.getdoc(testfunc_1) == inspect.getdoc(docstring0) # Test that it replaces an existing docstring @format_doc(docstring0) def testfunc_2(): '''not test''' pass assert inspect.getdoc(testfunc_2) == inspect.getdoc(docstring0) def test_format_doc_objectInput_format(): # Tests with object input and formatting def docstring(): '''test {0} test {opt}''' pass # Raises an indexerror if not given the formatted args and kwargs with pytest.raises(IndexError): @format_doc(docstring) def testfunc_fail(): pass # Test that the formatting is done right @format_doc(docstring, '+', opt='= 2 * test') def testfunc2(): pass assert inspect.getdoc(testfunc2) == 'test + test = 2 * test' # Test that we can include the original docstring def docstring2(): '''test {0} test {__doc__}''' pass @format_doc(docstring2, '+') def testfunc3(): '''= 4 / 2 * test''' pass assert inspect.getdoc(testfunc3) == 'test + test = 4 / 2 * test' def test_format_doc_selfInput_simple(): # Simple tests with self input # Self input while the function has no docstring raises an error with pytest.raises(ValueError): @format_doc(None) def testfunc_fail(): pass # Test that it keeps an existing docstring @format_doc(None) def testfunc_1(): '''not test''' pass assert inspect.getdoc(testfunc_1) == 'not test' def test_format_doc_selfInput_format(): # Tests with string input which is '__doc__' (special case) and formatting # Raises an indexerror if not given the formatted args and kwargs with pytest.raises(IndexError): @format_doc(None) def testfunc_fail(): '''dum {0} dum {opt}''' pass # Test that the formatting is done right @format_doc(None, 'di', opt='da dum') def testfunc1(): '''dum {0} dum {opt}''' pass assert inspect.getdoc(testfunc1) == 'dum di dum da dum' # Test that we cannot recursively insert the original documentation @format_doc(None, 'di') def testfunc2(): '''dum {0} dum {__doc__}''' pass assert inspect.getdoc(testfunc2) == 'dum di dum ' def test_format_doc_onMethod(): # Check if the decorator works on methods too, to spice it up we try double # decorator docstring = 'what we do {__doc__}' class TestClass: @format_doc(docstring) @format_doc(None, 'strange.') def test_method(self): '''is {0}''' pass assert inspect.getdoc(TestClass.test_method) == 'what we do is strange.' def test_format_doc_onClass(): # Check if the decorator works on classes too docstring = 'what we do {__doc__} {0}{opt}' @format_doc(docstring, 'strange', opt='.') class TestClass: '''is''' pass assert inspect.getdoc(TestClass) == 'what we do is strange.'
307ed3b52a1b5e6eb541c169d3d83a91b599be8453c95552314e5bea5920283f
# Licensed under a 3-clause BSD style license - see LICENSE.rst import sys import traceback import pytest from ..codegen import make_function_with_signature def test_make_function_with_signature_lineno(): """ Tests that a function made with ``make_function_with_signature`` is give the correct line number into the module it was created from (i.e. the line ``make_function_with_signature`` was called from). """ def crashy_function(*args, **kwargs): 1 / 0 # Make a wrapper around this function with the signature: # crashy_function(a, b) # Note: the signature is not really relevant to this test wrapped = make_function_with_signature(crashy_function, ('a', 'b')) line = """ wrapped = make_function_with_signature(crashy_function, ('a', 'b')) """.strip() try: wrapped(1, 2) except Exception: exc_cls, exc, tb = sys.exc_info() assert exc_cls is ZeroDivisionError # The *last* line in the traceback should be the 1 / 0 line in # crashy_function; the next line up should be the line that the # make_function_with_signature call was one tb_lines = traceback.format_tb(tb) assert '1 / 0' in tb_lines[-1] assert line in tb_lines[-2] and 'line =' not in tb_lines[-2] else: pytest.fail('This should have caused an exception')
0f4d8d8733b97a22c9ddf4fb8600d593d2ffc8f9693af7fccf2fa850772cf985
import time import numpy as np from ..misc import NumpyRNGContext def func(i): """An identity function that jitters its execution time by a pseudo-random amount. FIXME: This function should be defined in test_console.py, but Astropy's `python setup.py test` interacts strangely with Python's `multiprocessing` module. I was getting a mysterious PicklingError until I moved this function into a separate module. (It worked fine in a standalone pytest script.)""" with NumpyRNGContext(i): time.sleep(np.random.uniform(0, 0.01)) return i
043c1552ba82ab3619ce313dbc1f9aa24e089fa8ac2ec49b81631fe08c148402
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage contains utility modules for compatibility with older/newer versions of python, as well as including some bugfixes for the stdlib that are important for Astropy. Note that all public functions in the `astropy.utils.compat.misc` module are imported here for easier access. """ from .misc import * # Importing this module will also install monkey-patches defined in it from .numpycompat import *
c10c02006bbf70bb2c953642956a7852d5b00c90d49d6def1f92c8721075af42
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This is a collection of monkey patches and workarounds for bugs in earlier versions of Numpy. """ from ...utils import minversion __all__ = ['NUMPY_LT_1_10_4', 'NUMPY_LT_1_11', 'NUMPY_LT_1_11_2', 'NUMPY_LT_1_12', 'NUMPY_LT_1_13', 'NUMPY_LT_1_14'] # TODO: It might also be nice to have aliases to these named for specific # features/bugs we're checking for (ex: # astropy.table.table._BROKEN_UNICODE_TABLE_SORT) NUMPY_LT_1_10_4 = not minversion('numpy', '1.10.4') NUMPY_LT_1_11 = not minversion('numpy', '1.11.0') NUMPY_LT_1_11_2 = not minversion('numpy', '1.11.2') NUMPY_LT_1_12 = not minversion('numpy', '1.12') NUMPY_LT_1_13 = not minversion('numpy', '1.13') NUMPY_LT_1_14 = not minversion('numpy', '1.14dev')
0f46911008cacafedce12ab4e3ec2db28be80dcd8412a6074992a413828fad13
from inspect import signature, Parameter, Signature, BoundArguments __all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature'] import warnings from ..exceptions import AstropyDeprecationWarning warnings.warn("astropy.utils.compat.funcsigs is now deprecated - " "use inspect instead", AstropyDeprecationWarning)
2fce7b711e53d84bb081dc8b2db17854706e7b4afff5e73a663431ab29713e2e
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Simple utility functions and bug fixes for compatibility with all supported versions of Python. This module should generally not be used directly, as everything in `__all__` will be imported into `astropy.utils.compat` and can be accessed from there. """ import sys import functools from contextlib import suppress from importlib import invalidate_caches __all__ = ['invalidate_caches', 'override__dir__', 'suppress', 'possible_filename', 'namedtuple_asdict'] def possible_filename(filename): """ Determine if the ``filename`` argument is an allowable type for a filename. In Python 3.3 use of non-unicode filenames on system calls such as `os.stat` and others that accept a filename argument was deprecated (and may be removed outright in the future). Therefore this returns `True` in all cases except for `bytes` strings in Windows. """ if isinstance(filename, str): return True elif isinstance(filename, bytes): return not (sys.platform == 'win32') return False def override__dir__(f): """ When overriding a __dir__ method on an object, you often want to include the "standard" members on the object as well. This decorator takes care of that automatically, and all the wrapped function needs to do is return a list of the "special" members that wouldn't be found by the normal Python means. Example ------- @override__dir__ def __dir__(self): return ['special_method1', 'special_method2'] """ # http://bugs.python.org/issue12166 @functools.wraps(f) def override__dir__wrapper(self): members = set(object.__dir__(self)) members.update(f(self)) return sorted(members) return override__dir__wrapper def namedtuple_asdict(namedtuple): """ The same as ``namedtuple._adict()``. Parameters ---------- namedtuple : collections.namedtuple The named tuple to get the dict of """ return namedtuple._asdict()
15958d99cb0ed42d960b61ab43eea48595f7429ec0e1dc2f2ae475f1029d1f01
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import urllib.request import pytest import numpy as np from ....tests.helper import assert_quantity_allclose, catch_warnings from .. import iers from .... import units as u from ....table import QTable from ....time import Time FILE_NOT_FOUND_ERROR = getattr(__builtins__, 'FileNotFoundError', OSError) try: iers.IERS_A.open('finals2000A.all') # check if IERS_A is available except OSError: HAS_IERS_A = False else: HAS_IERS_A = True IERS_A_EXCERPT = os.path.join(os.path.dirname(__file__), 'iers_a_excerpt') class TestBasic(): """Basic tests that IERS_B returns correct values""" def test_simple(self): iers.IERS.close() assert iers.IERS.iers_table is None iers_tab = iers.IERS.open() assert iers.IERS.iers_table is not None assert isinstance(iers.IERS.iers_table, QTable) assert iers_tab['UT1_UTC'].unit is u.second assert iers_tab['PM_x'].unit is u.arcsecond assert iers_tab['PM_y'].unit is u.arcsecond jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5]) jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5]) ut1_utc = iers_tab.ut1_utc(jd1, jd2) assert isinstance(ut1_utc, u.Quantity) assert ut1_utc.unit is u.second # IERS files change at the 0.1 ms level; see gh-6981 assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184, 0.4131816, 0.41328895] * u.s, atol=0.1*u.ms) # should be future-proof; surely we've moved to another planet by then with pytest.raises(IndexError): ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.) # also check it returns the right status ut1_utc2, status2 = iers_tab.ut1_utc(jd1, jd2, return_status=True) assert np.all(status2 == iers.FROM_IERS_B) ut1_utc4, status4 = iers_tab.ut1_utc(1e11, 0., return_status=True) assert status4 == iers.TIME_BEYOND_IERS_RANGE # check it works via Time too t = Time(jd1, jd2, format='jd', scale='utc') ut1_utc3 = iers_tab.ut1_utc(t) assert_quantity_allclose(ut1_utc3, [-0.5868211, -0.5868184, -0.5868184, 0.4131816, 0.41328895] * u.s, atol=0.1*u.ms) # Table behaves properly as a table (e.g. can be sliced) assert len(iers_tab[:2]) == 2 def test_open_filename(self): iers.IERS.close() iers.IERS.open(iers.IERS_B_FILE) assert iers.IERS.iers_table is not None assert isinstance(iers.IERS.iers_table, QTable) iers.IERS.close() with pytest.raises(FILE_NOT_FOUND_ERROR): iers.IERS.open('surely this does not exist') def test_open_network_url(self): iers.IERS_A.close() iers.IERS_A.open("file:" + urllib.request.pathname2url(IERS_A_EXCERPT)) assert iers.IERS_A.iers_table is not None assert isinstance(iers.IERS_A.iers_table, QTable) iers.IERS_A.close() class TestIERS_AExcerpt(): def test_simple(self): # Test the IERS A reader. It is also a regression tests that ensures # values do not get overridden by IERS B; see #4933. iers_tab = iers.IERS_A.open(IERS_A_EXCERPT) assert iers_tab['UT1_UTC'].unit is u.second assert 'P' in iers_tab['UT1Flag'] assert 'I' in iers_tab['UT1Flag'] assert 'B' in iers_tab['UT1Flag'] assert np.all((iers_tab['UT1Flag'] == 'I') | (iers_tab['UT1Flag'] == 'P') | (iers_tab['UT1Flag'] == 'B')) assert iers_tab['dX_2000A'].unit is u.marcsec assert iers_tab['dY_2000A'].unit is u.marcsec assert 'P' in iers_tab['NutFlag'] assert 'I' in iers_tab['NutFlag'] assert 'B' in iers_tab['NutFlag'] assert np.all((iers_tab['NutFlag'] == 'P') | (iers_tab['NutFlag'] == 'I') | (iers_tab['NutFlag'] == 'B')) assert iers_tab['PM_x'].unit is u.arcsecond assert iers_tab['PM_y'].unit is u.arcsecond assert 'P' in iers_tab['PolPMFlag'] assert 'I' in iers_tab['PolPMFlag'] assert 'B' in iers_tab['PolPMFlag'] assert np.all((iers_tab['PolPMFlag'] == 'P') | (iers_tab['PolPMFlag'] == 'I') | (iers_tab['PolPMFlag'] == 'B')) t = Time([57053., 57054., 57055.], format='mjd') ut1_utc, status = iers_tab.ut1_utc(t, return_status=True) assert status[0] == iers.FROM_IERS_B assert np.all(status[1:] == iers.FROM_IERS_A) # These values are *exactly* as given in the table, so they should # match to double precision accuracy. assert_quantity_allclose(ut1_utc, [-0.4916557, -0.4925323, -0.4934373] * u.s, atol=0.1*u.ms) dcip_x,dcip_y, status = iers_tab.dcip_xy(t, return_status=True) assert status[0] == iers.FROM_IERS_B assert np.all(status[1:] == iers.FROM_IERS_A) # These values are *exactly* as given in the table, so they should # match to double precision accuracy. print(dcip_x) print(dcip_y) assert_quantity_allclose(dcip_x, [-0.086, -0.093, -0.087] * u.marcsec, atol=1.*u.narcsec) assert_quantity_allclose(dcip_y, [0.094, 0.081, 0.072] * u.marcsec, atol=1*u.narcsec) pm_x, pm_y, status = iers_tab.pm_xy(t, return_status=True) assert status[0] == iers.FROM_IERS_B assert np.all(status[1:] == iers.FROM_IERS_A) assert_quantity_allclose(pm_x, [0.003734, 0.004581, 0.004623] * u.arcsec, atol=0.1*u.marcsec) assert_quantity_allclose(pm_y, [0.310824, 0.313150, 0.315517] * u.arcsec, atol=0.1*u.marcsec) # Table behaves properly as a table (e.g. can be sliced) assert len(iers_tab[:2]) == 2 @pytest.mark.skipif(str('not HAS_IERS_A')) class TestIERS_A(): def test_simple(self): """Test that open() by default reads a 'finals2000A.all' file.""" # Ensure we remove any cached table (gh-5131). iers.IERS_A.close() iers_tab = iers.IERS_A.open() jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5]) jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5]) ut1_utc, status = iers_tab.ut1_utc(jd1, jd2, return_status=True) assert np.all(status == iers.FROM_IERS_B) assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184, 0.4131816, 0.41328895] * u.s, atol=0.1*u.ms) ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0., return_status=True) assert status2 == iers.TIME_BEYOND_IERS_RANGE tnow = Time.now() ut1_utc3, status3 = iers_tab.ut1_utc(tnow, return_status=True) assert status3 == iers.FROM_IERS_A_PREDICTION assert ut1_utc3 != 0. class TestIERS_Auto(): @pytest.mark.remote_data def test_no_auto_download(self): with iers.conf.set_temp('auto_download', False): t = iers.IERS_Auto.open() assert type(t) is iers.IERS_B @pytest.mark.remote_data def test_simple(self): iers_a_file_1 = os.path.join(os.path.dirname(__file__), 'finals2000A-2016-02-30-test') iers_a_file_2 = os.path.join(os.path.dirname(__file__), 'finals2000A-2016-04-30-test') iers_a_url_1 = os.path.normpath('file://' + os.path.abspath(iers_a_file_1)) iers_a_url_2 = os.path.normpath('file://' + os.path.abspath(iers_a_file_2)) with iers.conf.set_temp('iers_auto_url', iers_a_url_1): dat = iers.IERS_Auto.open() assert dat['MJD'][0] == 57359.0 * u.d assert dat['MJD'][-1] == 57539.0 * u.d # Pretend we are accessing at a time 7 days after start of predictive data predictive_mjd = dat.meta['predictive_mjd'] dat._time_now = Time(predictive_mjd, format='mjd') + 7 * u.d # Look at times before and after the test file begins. 0.1292905 is # the IERS-B value from MJD=57359. The value in # finals2000A-2016-02-30-test has been replaced at this point. assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905) assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.2246227) # Now pretend we are accessing at time 60 days after start of predictive data. # There will be a warning when downloading the file doesn't give new data # and an exception when extrapolating into the future with insufficient data. dat._time_now = Time(predictive_mjd, format='mjd') + 60 * u.d assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905) with catch_warnings(iers.IERSStaleWarning) as warns: with pytest.raises(ValueError) as err: dat.ut1_utc(Time(60000, format='mjd').jd) assert 'interpolating from IERS_Auto using predictive values' in str(err) assert len(warns) == 1 assert 'IERS_Auto predictive values are older' in str(warns[0].message) # Warning only if we are getting return status with catch_warnings(iers.IERSStaleWarning) as warns: dat.ut1_utc(Time(60000, format='mjd').jd, return_status=True) assert len(warns) == 1 assert 'IERS_Auto predictive values are older' in str(warns[0].message) # Now set auto_max_age = None which says that we don't care how old the # available IERS-A file is. There should be no warnings or exceptions. with iers.conf.set_temp('auto_max_age', None): with catch_warnings(iers.IERSStaleWarning) as warns: dat.ut1_utc(Time(60000, format='mjd').jd) assert not warns # Now point to a later file with same values but MJD increased by # 60 days and see that things work. dat._time_now is still the same value # as before, i.e. right around the start of predictive values for the new file. # (In other words this is like downloading the latest file online right now). with iers.conf.set_temp('iers_auto_url', iers_a_url_2): # Look at times before and after the test file begins. This forces a new download. assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905) assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.3) # Now the time range should be different. assert dat['MJD'][0] == 57359.0 * u.d assert dat['MJD'][-1] == (57539.0 + 60) * u.d
29149f397014797e1f6fb8f3ea56ed05297a6ceec410bd77f2afd2baf552bc09
# Licensed under a 3-clause BSD style license - see LICENSE.rst # LOCAL from ....utils.xml.iterparser import _fast_iterparse # SYSTEM import io import zlib # The C-based XML parser for VOTables previously used fixed-sized # buffers (allocated at __init__() time). This test will # only pass with the patch that allows a dynamic realloc() of # the queue. This addresses the bugs: # # - "RuntimeError: XML queue overflow" # https://github.com/astropy/astropy/issues/5824 # (Kudos to Stefan Becker---ARI/ZAH Heidelberg) # # - "iterparse.c: add queue_realloc() + move 'buffersize / 2' logic there" # https://github.com/astropy/astropy/issues/5869 # # This test code can emulate a combination of network buffering and # gzip decompression---with different request sizes, it can be used to # demonstrate both under-reading and over-reading. # # Using the 512-tag VOTABLE XML sample input, and various combinations # of minimum/maximum fetch sizes, the following situations can be # generated: # # maximum_fetch = 1 (ValueError, no element found) still within gzip headers # maximum_fetch = 80 (ValueError, unclosed token) short read # maximum_fetch =217 passes, because decompressed_length > requested # && <512 tags in a single parse # maximum_fetch =218 (RuntimeError, XML queue overflow) # # The test provided here covers the over-reading identified in #5824 # (equivalent to the 217). # Firstly, assemble a minimal VOTABLE header, table contents and footer. # This is done in textual form, as the aim is to only test the parser, not # the outputter! HEADER = """<?xml version="1.0" encoding="UTF-8"?> <VOTABLE> <RESOURCE type="results"> <TABLE> <FIELD ID="foo" name="foo" datatype="int" arraysize="1"/> <DATA> <TABLEDATA> """ ROW = """<TR><TD>0</TD></TR> """ FOOTER = """ </TABLEDATA> </DATA> </TABLE> </RESOURCE> </VOTABLE> """ # minimum passable buffer size => 1024 # 1024 / 2 => 512 tags for overflow # 512 - 7 tags in header, - 5 tags in footer = 500 tags required for overflow # 500 / 4 tags (<tr><td></td></tr>) per row == 125 rows required for overflow VOTABLE_XML = HEADER + 125*ROW + FOOTER # UngzipFileWrapper() wraps an existing file-like Object, # decompressing the content and returning the plaintext. # This therefore emulates the behaviour of the Python 'requests' # library when transparently decompressing Gzip HTTP responses. # # The critical behaviour is that---because of the # decompression---read() can return considerably more # bytes than were requested! (But, read() can also return less). # # inspiration: # http://stackoverflow.com/questions/4013843/how-to-wrap-file-object-read-and-write-operation-which-are-readonly class UngzipFileWrapper: def __init__(self, fd, **kwargs): self._file = fd self._z = zlib.decompressobj(16 + zlib.MAX_WBITS) def read(self, requested_length): # emulate network buffering dynamics by clamping the read size clamped_length = max(1, min(1 << 24, requested_length)) compressed = self._file.read(clamped_length) plaintext = self._z.decompress(compressed) # Only for real local files---just for the testcase if len(compressed) == 0: self.close() return plaintext def __getattr__(self, attr): return getattr(self._file, attr) # test_iterparser_over_read_simple() is a very cut down test, # of the original more flexible test-case, but without external # dependencies. The plaintext is compressed and then decompressed # to provide a better emulation of the original situation where # the bug was observed. # # If a dependency upon 'zlib' is not desired, it would be possible to # simplify this testcase by replacing the compress/decompress with a # read() method emulation that always returned more from a buffer tha # was requested. def test_iterparser_over_read_simple(): # Take the plaintext of 512 tags, and compression it with a # Gzip-style header (+16), to most closely emulate the behaviour # of most HTTP servers. zlib_GZIP_STYLE_HEADER = 16 compo = zlib.compressobj(zlib.Z_BEST_COMPRESSION, zlib.DEFLATED, zlib.MAX_WBITS + zlib_GZIP_STYLE_HEADER) # Bytes vs. String .encode()/.decode() for compatibility with Python 3.5. s = compo.compress(VOTABLE_XML.encode()) s = s + compo.flush() fd = io.BytesIO(s) fd.seek(0) # Finally setup the test of the C-based '_fast_iterparse()' iterator # and a situation in which it can be called a-la the VOTable Parser. MINIMUM_REQUESTABLE_BUFFER_SIZE = 1024 uncompressed_fd = UngzipFileWrapper(fd) iterable = _fast_iterparse(uncompressed_fd.read, MINIMUM_REQUESTABLE_BUFFER_SIZE) list(iterable)
b3545f1e532fb2740834cde771c87d8e343eadf9b47df9d67f0146903739738c
# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """NumPy functions and classes needed for astropy but not available in all supported NumPy versions. See docs/utils/numpy.rst for details. """ from .lib.stride_tricks import broadcast_arrays, broadcast_to from .core.multiarray import matmul
ed7a113b6f9de55be5dde4e81218177d84e84b2982a70e942b8dfbf114f68979
from concurrent.futures import * import warnings from ...exceptions import AstropyDeprecationWarning warnings.warn("astropy.utils.compat.futures is now deprecated - " "use concurrent.futures instead", AstropyDeprecationWarning)
b1c73f993bd5360dfb3d8c45d3ed4f773b70ee51cc7018081ca15b31e23d70d8
# coding: utf-8 # Licensed like numpy; see licenses/NUMPY_LICENSE.rst import warnings import numpy as np from numpy import matmul as np_matmul from ....exceptions import AstropyDeprecationWarning __all__ = ['matmul', 'GE1P10'] def GE1P10(module=np): return hasattr(module, 'matmul') def matmul(*args, **kwargs): warnings.warn( 'This function is deprecated, as it is available in all NumPy versions ' 'that this version of Astropy supports. You should use ' 'numpy.matmul directly.', AstropyDeprecationWarning) return np_matmul(*args, **kwargs)
3f2cbc22765df5702db95f2b18d71d849c7f348cfc27e983218a9f8081399d40
# coding: utf-8 # Licensed like the corresponding numpy file; see licenses/NUMPY_LICENSE.rst """ Utilities that manipulate strides to achieve desirable effects. An explanation of strides can be found in the "ndarray.rst" file in the NumPy reference guide. """ import warnings import numpy as np from numpy.lib.stride_tricks import ( broadcast_arrays as np_broadcast_arrays, broadcast_to as np_broadcast_to) from ....exceptions import AstropyDeprecationWarning __all__ = ['broadcast_arrays', 'broadcast_to', 'GE1P10'] __doctest_skip__ = ['*'] def GE1P10(module=np): return hasattr(module, 'broadcast_to') def broadcast_arrays(*args, **kwargs): warnings.warn( 'This function is deprecated, as it is available in all NumPy versions ' 'that this version of Astropy supports. You should use ' 'numpy.broadcast_arrays directly.', AstropyDeprecationWarning) return np_broadcast_arrays(*args, **kwargs) def broadcast_to(*args, **kwargs): warnings.warn( 'This function is deprecated, as it is available in all NumPy versions ' 'that this version of Astropy supports. You should use ' 'numpy.broadcast_to directly.', AstropyDeprecationWarning) return np_broadcast_to(*args, **kwargs)
55ee51821eadc3b962042785c9d517352f3614d5ac9c705164ee6a59d9332d5f
# ----------------------------------------------------------------------------- # ply: lex.py # # Copyright (C) 2001-2016 # David M. Beazley (Dabeaz LLC) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the David Beazley or Dabeaz LLC may be used to # endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------------------------- __version__ = '3.9' __tabversion__ = '3.8' import re import sys import types import copy import os import inspect # This tuple contains known string types try: # Python 2.6 StringTypes = (types.StringType, types.UnicodeType) except AttributeError: # Python 3.0 StringTypes = (str, bytes) # This regular expression is used to match valid token names _is_identifier = re.compile(r'^[a-zA-Z0-9_]+$') # Exception thrown when invalid token encountered and no default error # handler is defined. class LexError(Exception): def __init__(self, message, s): self.args = (message,) self.text = s # Token class. This class is used to represent the tokens produced. class LexToken(object): def __str__(self): return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos) def __repr__(self): return str(self) # This object is a stand-in for a logging object created by the # logging module. class PlyLogger(object): def __init__(self, f): self.f = f def critical(self, msg, *args, **kwargs): self.f.write((msg % args) + '\n') def warning(self, msg, *args, **kwargs): self.f.write('WARNING: ' + (msg % args) + '\n') def error(self, msg, *args, **kwargs): self.f.write('ERROR: ' + (msg % args) + '\n') info = critical debug = critical # Null logger is used when no output is generated. Does nothing. class NullLogger(object): def __getattribute__(self, name): return self def __call__(self, *args, **kwargs): return self # ----------------------------------------------------------------------------- # === Lexing Engine === # # The following Lexer class implements the lexer runtime. There are only # a few public methods and attributes: # # input() - Store a new string in the lexer # token() - Get the next token # clone() - Clone the lexer # # lineno - Current line number # lexpos - Current position in the input string # ----------------------------------------------------------------------------- class Lexer: def __init__(self): self.lexre = None # Master regular expression. This is a list of # tuples (re, findex) where re is a compiled # regular expression and findex is a list # mapping regex group numbers to rules self.lexretext = None # Current regular expression strings self.lexstatere = {} # Dictionary mapping lexer states to master regexs self.lexstateretext = {} # Dictionary mapping lexer states to regex strings self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names self.lexstate = 'INITIAL' # Current lexer state self.lexstatestack = [] # Stack of lexer states self.lexstateinfo = None # State information self.lexstateignore = {} # Dictionary of ignored characters for each state self.lexstateerrorf = {} # Dictionary of error functions for each state self.lexstateeoff = {} # Dictionary of eof functions for each state self.lexreflags = 0 # Optional re compile flags self.lexdata = None # Actual input data (as a string) self.lexpos = 0 # Current position in input text self.lexlen = 0 # Length of the input text self.lexerrorf = None # Error rule (if any) self.lexeoff = None # EOF rule (if any) self.lextokens = None # List of valid tokens self.lexignore = '' # Ignored characters self.lexliterals = '' # Literal characters that can be passed through self.lexmodule = None # Module self.lineno = 1 # Current line number self.lexoptimize = False # Optimized mode def clone(self, object=None): c = copy.copy(self) # If the object parameter has been supplied, it means we are attaching the # lexer to a new object. In this case, we have to rebind all methods in # the lexstatere and lexstateerrorf tables. if object: newtab = {} for key, ritem in self.lexstatere.items(): newre = [] for cre, findex in ritem: newfindex = [] for f in findex: if not f or not f[0]: newfindex.append(f) continue newfindex.append((getattr(object, f[0].__name__), f[1])) newre.append((cre, newfindex)) newtab[key] = newre c.lexstatere = newtab c.lexstateerrorf = {} for key, ef in self.lexstateerrorf.items(): c.lexstateerrorf[key] = getattr(object, ef.__name__) c.lexmodule = object return c # ------------------------------------------------------------ # writetab() - Write lexer information to a table file # ------------------------------------------------------------ def writetab(self, lextab, outputdir=''): if isinstance(lextab, types.ModuleType): raise IOError("Won't overwrite existing lextab module") basetabmodule = lextab.split('.')[-1] filename = os.path.join(outputdir, basetabmodule) + '.py' with open(filename, 'w') as tf: tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__)) tf.write('_tabversion = %s\n' % repr(__tabversion__)) tf.write('_lextokens = set(%s)\n' % repr(tuple(self.lextokens))) tf.write('_lexreflags = %s\n' % repr(self.lexreflags)) tf.write('_lexliterals = %s\n' % repr(self.lexliterals)) tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo)) # Rewrite the lexstatere table, replacing function objects with function names tabre = {} for statename, lre in self.lexstatere.items(): titem = [] for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]): titem.append((retext, _funcs_to_names(func, renames))) tabre[statename] = titem tf.write('_lexstatere = %s\n' % repr(tabre)) tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore)) taberr = {} for statename, ef in self.lexstateerrorf.items(): taberr[statename] = ef.__name__ if ef else None tf.write('_lexstateerrorf = %s\n' % repr(taberr)) tabeof = {} for statename, ef in self.lexstateeoff.items(): tabeof[statename] = ef.__name__ if ef else None tf.write('_lexstateeoff = %s\n' % repr(tabeof)) # ------------------------------------------------------------ # readtab() - Read lexer information from a tab file # ------------------------------------------------------------ def readtab(self, tabfile, fdict): if isinstance(tabfile, types.ModuleType): lextab = tabfile else: exec('import %s' % tabfile) lextab = sys.modules[tabfile] if getattr(lextab, '_tabversion', '0.0') != __tabversion__: raise ImportError('Inconsistent PLY version') self.lextokens = lextab._lextokens self.lexreflags = lextab._lexreflags self.lexliterals = lextab._lexliterals self.lextokens_all = self.lextokens | set(self.lexliterals) self.lexstateinfo = lextab._lexstateinfo self.lexstateignore = lextab._lexstateignore self.lexstatere = {} self.lexstateretext = {} for statename, lre in lextab._lexstatere.items(): titem = [] txtitem = [] for pat, func_name in lre: titem.append((re.compile(pat, lextab._lexreflags | re.VERBOSE), _names_to_funcs(func_name, fdict))) self.lexstatere[statename] = titem self.lexstateretext[statename] = txtitem self.lexstateerrorf = {} for statename, ef in lextab._lexstateerrorf.items(): self.lexstateerrorf[statename] = fdict[ef] self.lexstateeoff = {} for statename, ef in lextab._lexstateeoff.items(): self.lexstateeoff[statename] = fdict[ef] self.begin('INITIAL') # ------------------------------------------------------------ # input() - Push a new string into the lexer # ------------------------------------------------------------ def input(self, s): # Pull off the first character to see if s looks like a string c = s[:1] if not isinstance(c, StringTypes): raise ValueError('Expected a string') self.lexdata = s self.lexpos = 0 self.lexlen = len(s) # ------------------------------------------------------------ # begin() - Changes the lexing state # ------------------------------------------------------------ def begin(self, state): if state not in self.lexstatere: raise ValueError('Undefined state') self.lexre = self.lexstatere[state] self.lexretext = self.lexstateretext[state] self.lexignore = self.lexstateignore.get(state, '') self.lexerrorf = self.lexstateerrorf.get(state, None) self.lexeoff = self.lexstateeoff.get(state, None) self.lexstate = state # ------------------------------------------------------------ # push_state() - Changes the lexing state and saves old on stack # ------------------------------------------------------------ def push_state(self, state): self.lexstatestack.append(self.lexstate) self.begin(state) # ------------------------------------------------------------ # pop_state() - Restores the previous state # ------------------------------------------------------------ def pop_state(self): self.begin(self.lexstatestack.pop()) # ------------------------------------------------------------ # current_state() - Returns the current lexing state # ------------------------------------------------------------ def current_state(self): return self.lexstate # ------------------------------------------------------------ # skip() - Skip ahead n characters # ------------------------------------------------------------ def skip(self, n): self.lexpos += n # ------------------------------------------------------------ # opttoken() - Return the next token from the Lexer # # Note: This function has been carefully implemented to be as fast # as possible. Don't make changes unless you really know what # you are doing # ------------------------------------------------------------ def token(self): # Make local copies of frequently referenced attributes lexpos = self.lexpos lexlen = self.lexlen lexignore = self.lexignore lexdata = self.lexdata while lexpos < lexlen: # This code provides some short-circuit code for whitespace, tabs, and other ignored characters if lexdata[lexpos] in lexignore: lexpos += 1 continue # Look for a regular expression match for lexre, lexindexfunc in self.lexre: m = lexre.match(lexdata, lexpos) if not m: continue # Create a token for return tok = LexToken() tok.value = m.group() tok.lineno = self.lineno tok.lexpos = lexpos i = m.lastindex func, tok.type = lexindexfunc[i] if not func: # If no token type was set, it's an ignored token if tok.type: self.lexpos = m.end() return tok else: lexpos = m.end() break lexpos = m.end() # If token is processed by a function, call it tok.lexer = self # Set additional attributes useful in token rules self.lexmatch = m self.lexpos = lexpos newtok = func(tok) # Every function must return a token, if nothing, we just move to next token if not newtok: lexpos = self.lexpos # This is here in case user has updated lexpos. lexignore = self.lexignore # This is here in case there was a state change break # Verify type of the token. If not in the token map, raise an error if not self.lexoptimize: if newtok.type not in self.lextokens_all: raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % ( func.__code__.co_filename, func.__code__.co_firstlineno, func.__name__, newtok.type), lexdata[lexpos:]) return newtok else: # No match, see if in literals if lexdata[lexpos] in self.lexliterals: tok = LexToken() tok.value = lexdata[lexpos] tok.lineno = self.lineno tok.type = tok.value tok.lexpos = lexpos self.lexpos = lexpos + 1 return tok # No match. Call t_error() if defined. if self.lexerrorf: tok = LexToken() tok.value = self.lexdata[lexpos:] tok.lineno = self.lineno tok.type = 'error' tok.lexer = self tok.lexpos = lexpos self.lexpos = lexpos newtok = self.lexerrorf(tok) if lexpos == self.lexpos: # Error method didn't change text position at all. This is an error. raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:]) lexpos = self.lexpos if not newtok: continue return newtok self.lexpos = lexpos raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:]) if self.lexeoff: tok = LexToken() tok.type = 'eof' tok.value = '' tok.lineno = self.lineno tok.lexpos = lexpos tok.lexer = self self.lexpos = lexpos newtok = self.lexeoff(tok) return newtok self.lexpos = lexpos + 1 if self.lexdata is None: raise RuntimeError('No input string given with input()') return None # Iterator interface def __iter__(self): return self def next(self): t = self.token() if t is None: raise StopIteration return t __next__ = next # ----------------------------------------------------------------------------- # ==== Lex Builder === # # The functions and classes below are used to collect lexing information # and build a Lexer object from it. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # _get_regex(func) # # Returns the regular expression assigned to a function either as a doc string # or as a .regex attribute attached by the @TOKEN decorator. # ----------------------------------------------------------------------------- def _get_regex(func): return getattr(func, 'regex', func.__doc__) # ----------------------------------------------------------------------------- # get_caller_module_dict() # # This function returns a dictionary containing all of the symbols defined within # a caller further down the call stack. This is used to get the environment # associated with the yacc() call if none was provided. # ----------------------------------------------------------------------------- def get_caller_module_dict(levels): f = sys._getframe(levels) ldict = f.f_globals.copy() if f.f_globals != f.f_locals: ldict.update(f.f_locals) return ldict # ----------------------------------------------------------------------------- # _funcs_to_names() # # Given a list of regular expression functions, this converts it to a list # suitable for output to a table file # ----------------------------------------------------------------------------- def _funcs_to_names(funclist, namelist): result = [] for f, name in zip(funclist, namelist): if f and f[0]: result.append((name, f[1])) else: result.append(f) return result # ----------------------------------------------------------------------------- # _names_to_funcs() # # Given a list of regular expression function names, this converts it back to # functions. # ----------------------------------------------------------------------------- def _names_to_funcs(namelist, fdict): result = [] for n in namelist: if n and n[0]: result.append((fdict[n[0]], n[1])) else: result.append(n) return result # ----------------------------------------------------------------------------- # _form_master_re() # # This function takes a list of all of the regex components and attempts to # form the master regular expression. Given limitations in the Python re # module, it may be necessary to break the master regex into separate expressions. # ----------------------------------------------------------------------------- def _form_master_re(relist, reflags, ldict, toknames): if not relist: return [] regex = '|'.join(relist) try: lexre = re.compile(regex, re.VERBOSE | reflags) # Build the index to function map for the matching engine lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1) lexindexnames = lexindexfunc[:] for f, i in lexre.groupindex.items(): handle = ldict.get(f, None) if type(handle) in (types.FunctionType, types.MethodType): lexindexfunc[i] = (handle, toknames[f]) lexindexnames[i] = f elif handle is not None: lexindexnames[i] = f if f.find('ignore_') > 0: lexindexfunc[i] = (None, None) else: lexindexfunc[i] = (None, toknames[f]) return [(lexre, lexindexfunc)], [regex], [lexindexnames] except Exception: m = int(len(relist)/2) if m == 0: m = 1 llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames) rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames) return (llist+rlist), (lre+rre), (lnames+rnames) # ----------------------------------------------------------------------------- # def _statetoken(s,names) # # Given a declaration name s of the form "t_" and a dictionary whose keys are # state names, this function returns a tuple (states,tokenname) where states # is a tuple of state names and tokenname is the name of the token. For example, # calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM') # ----------------------------------------------------------------------------- def _statetoken(s, names): nonstate = 1 parts = s.split('_') for i, part in enumerate(parts[1:], 1): if part not in names and part != 'ANY': break if i > 1: states = tuple(parts[1:i]) else: states = ('INITIAL',) if 'ANY' in states: states = tuple(names) tokenname = '_'.join(parts[i:]) return (states, tokenname) # ----------------------------------------------------------------------------- # LexerReflect() # # This class represents information needed to build a lexer as extracted from a # user's input file. # ----------------------------------------------------------------------------- class LexerReflect(object): def __init__(self, ldict, log=None, reflags=0): self.ldict = ldict self.error_func = None self.tokens = [] self.reflags = reflags self.stateinfo = {'INITIAL': 'inclusive'} self.modules = set() self.error = False self.log = PlyLogger(sys.stderr) if log is None else log # Get all of the basic information def get_all(self): self.get_tokens() self.get_literals() self.get_states() self.get_rules() # Validate all of the information def validate_all(self): self.validate_tokens() self.validate_literals() self.validate_rules() return self.error # Get the tokens map def get_tokens(self): tokens = self.ldict.get('tokens', None) if not tokens: self.log.error('No token list is defined') self.error = True return if not isinstance(tokens, (list, tuple)): self.log.error('tokens must be a list or tuple') self.error = True return if not tokens: self.log.error('tokens is empty') self.error = True return self.tokens = tokens # Validate the tokens def validate_tokens(self): terminals = {} for n in self.tokens: if not _is_identifier.match(n): self.log.error("Bad token name '%s'", n) self.error = True if n in terminals: self.log.warning("Token '%s' multiply defined", n) terminals[n] = 1 # Get the literals specifier def get_literals(self): self.literals = self.ldict.get('literals', '') if not self.literals: self.literals = '' # Validate literals def validate_literals(self): try: for c in self.literals: if not isinstance(c, StringTypes) or len(c) > 1: self.log.error('Invalid literal %s. Must be a single character', repr(c)) self.error = True except TypeError: self.log.error('Invalid literals specification. literals must be a sequence of characters') self.error = True def get_states(self): self.states = self.ldict.get('states', None) # Build statemap if self.states: if not isinstance(self.states, (tuple, list)): self.log.error('states must be defined as a tuple or list') self.error = True else: for s in self.states: if not isinstance(s, tuple) or len(s) != 2: self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s)) self.error = True continue name, statetype = s if not isinstance(name, StringTypes): self.log.error('State name %s must be a string', repr(name)) self.error = True continue if not (statetype == 'inclusive' or statetype == 'exclusive'): self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name) self.error = True continue if name in self.stateinfo: self.log.error("State '%s' already defined", name) self.error = True continue self.stateinfo[name] = statetype # Get all of the symbols with a t_ prefix and sort them into various # categories (functions, strings, error functions, and ignore characters) def get_rules(self): tsymbols = [f for f in self.ldict if f[:2] == 't_'] # Now build up a list of functions and a list of strings self.toknames = {} # Mapping of symbols to token names self.funcsym = {} # Symbols defined as functions self.strsym = {} # Symbols defined as strings self.ignore = {} # Ignore strings by state self.errorf = {} # Error functions by state self.eoff = {} # EOF functions by state for s in self.stateinfo: self.funcsym[s] = [] self.strsym[s] = [] if len(tsymbols) == 0: self.log.error('No rules of the form t_rulename are defined') self.error = True return for f in tsymbols: t = self.ldict[f] states, tokname = _statetoken(f, self.stateinfo) self.toknames[f] = tokname if hasattr(t, '__call__'): if tokname == 'error': for s in states: self.errorf[s] = t elif tokname == 'eof': for s in states: self.eoff[s] = t elif tokname == 'ignore': line = t.__code__.co_firstlineno file = t.__code__.co_filename self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__) self.error = True else: for s in states: self.funcsym[s].append((f, t)) elif isinstance(t, StringTypes): if tokname == 'ignore': for s in states: self.ignore[s] = t if '\\' in t: self.log.warning("%s contains a literal backslash '\\'", f) elif tokname == 'error': self.log.error("Rule '%s' must be defined as a function", f) self.error = True else: for s in states: self.strsym[s].append((f, t)) else: self.log.error('%s not defined as a function or string', f) self.error = True # Sort the functions by line number for f in self.funcsym.values(): f.sort(key=lambda x: x[1].__code__.co_firstlineno) # Sort the strings by regular expression length for s in self.strsym.values(): s.sort(key=lambda x: len(x[1]), reverse=True) # Validate all of the t_rules collected def validate_rules(self): for state in self.stateinfo: # Validate all rules defined by functions for fname, f in self.funcsym[state]: line = f.__code__.co_firstlineno file = f.__code__.co_filename module = inspect.getmodule(f) self.modules.add(module) tokname = self.toknames[fname] if isinstance(f, types.MethodType): reqargs = 2 else: reqargs = 1 nargs = f.__code__.co_argcount if nargs > reqargs: self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) self.error = True continue if nargs < reqargs: self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) self.error = True continue if not _get_regex(f): self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__) self.error = True continue try: c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), re.VERBOSE | self.reflags) if c.match(''): self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__) self.error = True except re.error as e: self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e) if '#' in _get_regex(f): self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__) self.error = True # Validate all rules defined by strings for name, r in self.strsym[state]: tokname = self.toknames[name] if tokname == 'error': self.log.error("Rule '%s' must be defined as a function", name) self.error = True continue if tokname not in self.tokens and tokname.find('ignore_') < 0: self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname) self.error = True continue try: c = re.compile('(?P<%s>%s)' % (name, r), re.VERBOSE | self.reflags) if (c.match('')): self.log.error("Regular expression for rule '%s' matches empty string", name) self.error = True except re.error as e: self.log.error("Invalid regular expression for rule '%s'. %s", name, e) if '#' in r: self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name) self.error = True if not self.funcsym[state] and not self.strsym[state]: self.log.error("No rules defined for state '%s'", state) self.error = True # Validate the error function efunc = self.errorf.get(state, None) if efunc: f = efunc line = f.__code__.co_firstlineno file = f.__code__.co_filename module = inspect.getmodule(f) self.modules.add(module) if isinstance(f, types.MethodType): reqargs = 2 else: reqargs = 1 nargs = f.__code__.co_argcount if nargs > reqargs: self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) self.error = True if nargs < reqargs: self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) self.error = True for module in self.modules: self.validate_module(module) # ----------------------------------------------------------------------------- # validate_module() # # This checks to see if there are duplicated t_rulename() functions or strings # in the parser input file. This is done using a simple regular expression # match on each line in the source code of the given module. # ----------------------------------------------------------------------------- def validate_module(self, module): try: lines, linen = inspect.getsourcelines(module) except IOError: return fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(') sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=') counthash = {} linen += 1 for line in lines: m = fre.match(line) if not m: m = sre.match(line) if m: name = m.group(1) prev = counthash.get(name) if not prev: counthash[name] = linen else: filename = inspect.getsourcefile(module) self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev) self.error = True linen += 1 # ----------------------------------------------------------------------------- # lex(module) # # Build all of the regular expression rules from definitions in the supplied module # ----------------------------------------------------------------------------- def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab', reflags=0, nowarn=False, outputdir=None, debuglog=None, errorlog=None): if lextab is None: lextab = 'lextab' global lexer ldict = None stateinfo = {'INITIAL': 'inclusive'} lexobj = Lexer() lexobj.lexoptimize = optimize global token, input if errorlog is None: errorlog = PlyLogger(sys.stderr) if debug: if debuglog is None: debuglog = PlyLogger(sys.stderr) # Get the module dictionary used for the lexer if object: module = object # Get the module dictionary used for the parser if module: _items = [(k, getattr(module, k)) for k in dir(module)] ldict = dict(_items) # If no __file__ attribute is available, try to obtain it from the __module__ instead if '__file__' not in ldict: ldict['__file__'] = sys.modules[ldict['__module__']].__file__ else: ldict = get_caller_module_dict(2) # Determine if the module is package of a package or not. # If so, fix the tabmodule setting so that tables load correctly pkg = ldict.get('__package__') if pkg and isinstance(lextab, str): if '.' not in lextab: lextab = pkg + '.' + lextab # Collect parser information from the dictionary linfo = LexerReflect(ldict, log=errorlog, reflags=reflags) linfo.get_all() if not optimize: if linfo.validate_all(): raise SyntaxError("Can't build lexer") if optimize and lextab: try: lexobj.readtab(lextab, ldict) token = lexobj.token input = lexobj.input lexer = lexobj return lexobj except ImportError: pass # Dump some basic debugging information if debug: debuglog.info('lex: tokens = %r', linfo.tokens) debuglog.info('lex: literals = %r', linfo.literals) debuglog.info('lex: states = %r', linfo.stateinfo) # Build a dictionary of valid token names lexobj.lextokens = set() for n in linfo.tokens: lexobj.lextokens.add(n) # Get literals specification if isinstance(linfo.literals, (list, tuple)): lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals) else: lexobj.lexliterals = linfo.literals lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals) # Get the stateinfo dictionary stateinfo = linfo.stateinfo regexs = {} # Build the master regular expressions for state in stateinfo: regex_list = [] # Add rules defined by functions first for fname, f in linfo.funcsym[state]: line = f.__code__.co_firstlineno file = f.__code__.co_filename regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f))) if debug: debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state) # Now add all of the simple rules for name, r in linfo.strsym[state]: regex_list.append('(?P<%s>%s)' % (name, r)) if debug: debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state) regexs[state] = regex_list # Build the master regular expressions if debug: debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====') for state in regexs: lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames) lexobj.lexstatere[state] = lexre lexobj.lexstateretext[state] = re_text lexobj.lexstaterenames[state] = re_names if debug: for i, text in enumerate(re_text): debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text) # For inclusive states, we need to add the regular expressions from the INITIAL state for state, stype in stateinfo.items(): if state != 'INITIAL' and stype == 'inclusive': lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL']) lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL']) lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL']) lexobj.lexstateinfo = stateinfo lexobj.lexre = lexobj.lexstatere['INITIAL'] lexobj.lexretext = lexobj.lexstateretext['INITIAL'] lexobj.lexreflags = reflags # Set up ignore variables lexobj.lexstateignore = linfo.ignore lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '') # Set up error functions lexobj.lexstateerrorf = linfo.errorf lexobj.lexerrorf = linfo.errorf.get('INITIAL', None) if not lexobj.lexerrorf: errorlog.warning('No t_error rule is defined') # Set up eof functions lexobj.lexstateeoff = linfo.eoff lexobj.lexeoff = linfo.eoff.get('INITIAL', None) # Check state information for ignore and error rules for s, stype in stateinfo.items(): if stype == 'exclusive': if s not in linfo.errorf: errorlog.warning("No error rule is defined for exclusive state '%s'", s) if s not in linfo.ignore and lexobj.lexignore: errorlog.warning("No ignore rule is defined for exclusive state '%s'", s) elif stype == 'inclusive': if s not in linfo.errorf: linfo.errorf[s] = linfo.errorf.get('INITIAL', None) if s not in linfo.ignore: linfo.ignore[s] = linfo.ignore.get('INITIAL', '') # Create global versions of the token() and input() functions token = lexobj.token input = lexobj.input lexer = lexobj # If in optimize mode, we write the lextab if lextab and optimize: if outputdir is None: # If no output directory is set, the location of the output files # is determined according to the following rules: # - If lextab specifies a package, files go into that package directory # - Otherwise, files go in the same directory as the specifying module if isinstance(lextab, types.ModuleType): srcfile = lextab.__file__ else: if '.' not in lextab: srcfile = ldict['__file__'] else: parts = lextab.split('.') pkgname = '.'.join(parts[:-1]) exec('import %s' % pkgname) srcfile = getattr(sys.modules[pkgname], '__file__', '') outputdir = os.path.dirname(srcfile) try: lexobj.writetab(lextab, outputdir) except IOError as e: errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e)) return lexobj # ----------------------------------------------------------------------------- # runmain() # # This runs the lexer as a main program # ----------------------------------------------------------------------------- def runmain(lexer=None, data=None): if not data: try: filename = sys.argv[1] f = open(filename) data = f.read() f.close() except IndexError: sys.stdout.write('Reading from standard input (type EOF to end):\n') data = sys.stdin.read() if lexer: _input = lexer.input else: _input = input _input(data) if lexer: _token = lexer.token else: _token = token while True: tok = _token() if not tok: break sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos)) # ----------------------------------------------------------------------------- # @TOKEN(regex) # # This decorator function can be used to set the regex expression on a function # when its docstring might need to be set in an alternative way # ----------------------------------------------------------------------------- def TOKEN(r): def set_regex(f): if hasattr(r, '__call__'): f.regex = _get_regex(r) else: f.regex = r return f return set_regex # Alternative spelling of the TOKEN decorator Token = TOKEN
ad5a69f42f3c16a0f8a8253c8f2a8ab3d076094b163a29231e3bf8ee5aedfd2f
# ----------------------------------------------------------------------------- # cpp.py # # Author: David Beazley (http://www.dabeaz.com) # Copyright (C) 2007 # All rights reserved # # This module implements an ANSI-C style lexical preprocessor for PLY. # ----------------------------------------------------------------------------- from __future__ import generators import sys # Some Python 3 compatibility shims if sys.version_info.major < 3: STRING_TYPES = (str, unicode) else: STRING_TYPES = str xrange = range # ----------------------------------------------------------------------------- # Default preprocessor lexer definitions. These tokens are enough to get # a basic preprocessor working. Other modules may import these if they want # ----------------------------------------------------------------------------- tokens = ( 'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND' ) literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\"" # Whitespace def t_CPP_WS(t): r'\s+' t.lexer.lineno += t.value.count("\n") return t t_CPP_POUND = r'\#' t_CPP_DPOUND = r'\#\#' # Identifier t_CPP_ID = r'[A-Za-z_][\w_]*' # Integer literal def CPP_INTEGER(t): r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)' return t t_CPP_INTEGER = CPP_INTEGER # Floating literal t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?' # String literal def t_CPP_STRING(t): r'\"([^\\\n]|(\\(.|\n)))*?\"' t.lexer.lineno += t.value.count("\n") return t # Character constant 'c' or L'c' def t_CPP_CHAR(t): r'(L)?\'([^\\\n]|(\\(.|\n)))*?\'' t.lexer.lineno += t.value.count("\n") return t # Comment def t_CPP_COMMENT1(t): r'(/\*(.|\n)*?\*/)' ncr = t.value.count("\n") t.lexer.lineno += ncr # replace with one space or a number of '\n' t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' ' return t # Line comment def t_CPP_COMMENT2(t): r'(//.*?(\n|$))' # replace with '/n' t.type = 'CPP_WS'; t.value = '\n' def t_error(t): t.type = t.value[0] t.value = t.value[0] t.lexer.skip(1) return t import re import copy import time import os.path # ----------------------------------------------------------------------------- # trigraph() # # Given an input string, this function replaces all trigraph sequences. # The following mapping is used: # # ??= # # ??/ \ # ??' ^ # ??( [ # ??) ] # ??! | # ??< { # ??> } # ??- ~ # ----------------------------------------------------------------------------- _trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''') _trigraph_rep = { '=':'#', '/':'\\', "'":'^', '(':'[', ')':']', '!':'|', '<':'{', '>':'}', '-':'~' } def trigraph(input): return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input) # ------------------------------------------------------------------ # Macro object # # This object holds information about preprocessor macros # # .name - Macro name (string) # .value - Macro value (a list of tokens) # .arglist - List of argument names # .variadic - Boolean indicating whether or not variadic macro # .vararg - Name of the variadic parameter # # When a macro is created, the macro replacement token sequence is # pre-scanned and used to create patch lists that are later used # during macro expansion # ------------------------------------------------------------------ class Macro(object): def __init__(self,name,value,arglist=None,variadic=False): self.name = name self.value = value self.arglist = arglist self.variadic = variadic if variadic: self.vararg = arglist[-1] self.source = None # ------------------------------------------------------------------ # Preprocessor object # # Object representing a preprocessor. Contains macro definitions, # include directories, and other information # ------------------------------------------------------------------ class Preprocessor(object): def __init__(self,lexer=None): if lexer is None: lexer = lex.lexer self.lexer = lexer self.macros = { } self.path = [] self.temp_path = [] # Probe the lexer for selected tokens self.lexprobe() tm = time.localtime() self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm)) self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm)) self.parser = None # ----------------------------------------------------------------------------- # tokenize() # # Utility function. Given a string of text, tokenize into a list of tokens # ----------------------------------------------------------------------------- def tokenize(self,text): tokens = [] self.lexer.input(text) while True: tok = self.lexer.token() if not tok: break tokens.append(tok) return tokens # --------------------------------------------------------------------- # error() # # Report a preprocessor error/warning of some kind # ---------------------------------------------------------------------- def error(self,file,line,msg): print("%s:%d %s" % (file,line,msg)) # ---------------------------------------------------------------------- # lexprobe() # # This method probes the preprocessor lexer object to discover # the token types of symbols that are important to the preprocessor. # If this works right, the preprocessor will simply "work" # with any suitable lexer regardless of how tokens have been named. # ---------------------------------------------------------------------- def lexprobe(self): # Determine the token type for identifiers self.lexer.input("identifier") tok = self.lexer.token() if not tok or tok.value != "identifier": print("Couldn't determine identifier type") else: self.t_ID = tok.type # Determine the token type for integers self.lexer.input("12345") tok = self.lexer.token() if not tok or int(tok.value) != 12345: print("Couldn't determine integer type") else: self.t_INTEGER = tok.type self.t_INTEGER_TYPE = type(tok.value) # Determine the token type for strings enclosed in double quotes self.lexer.input("\"filename\"") tok = self.lexer.token() if not tok or tok.value != "\"filename\"": print("Couldn't determine string type") else: self.t_STRING = tok.type # Determine the token type for whitespace--if any self.lexer.input(" ") tok = self.lexer.token() if not tok or tok.value != " ": self.t_SPACE = None else: self.t_SPACE = tok.type # Determine the token type for newlines self.lexer.input("\n") tok = self.lexer.token() if not tok or tok.value != "\n": self.t_NEWLINE = None print("Couldn't determine token for newlines") else: self.t_NEWLINE = tok.type self.t_WS = (self.t_SPACE, self.t_NEWLINE) # Check for other characters used by the preprocessor chars = [ '<','>','#','##','\\','(',')',',','.'] for c in chars: self.lexer.input(c) tok = self.lexer.token() if not tok or tok.value != c: print("Unable to lex '%s' required for preprocessor" % c) # ---------------------------------------------------------------------- # add_path() # # Adds a search path to the preprocessor. # ---------------------------------------------------------------------- def add_path(self,path): self.path.append(path) # ---------------------------------------------------------------------- # group_lines() # # Given an input string, this function splits it into lines. Trailing whitespace # is removed. Any line ending with \ is grouped with the next line. This # function forms the lowest level of the preprocessor---grouping into text into # a line-by-line format. # ---------------------------------------------------------------------- def group_lines(self,input): lex = self.lexer.clone() lines = [x.rstrip() for x in input.splitlines()] for i in xrange(len(lines)): j = i+1 while lines[i].endswith('\\') and (j < len(lines)): lines[i] = lines[i][:-1]+lines[j] lines[j] = "" j += 1 input = "\n".join(lines) lex.input(input) lex.lineno = 1 current_line = [] while True: tok = lex.token() if not tok: break current_line.append(tok) if tok.type in self.t_WS and '\n' in tok.value: yield current_line current_line = [] if current_line: yield current_line # ---------------------------------------------------------------------- # tokenstrip() # # Remove leading/trailing whitespace tokens from a token list # ---------------------------------------------------------------------- def tokenstrip(self,tokens): i = 0 while i < len(tokens) and tokens[i].type in self.t_WS: i += 1 del tokens[:i] i = len(tokens)-1 while i >= 0 and tokens[i].type in self.t_WS: i -= 1 del tokens[i+1:] return tokens # ---------------------------------------------------------------------- # collect_args() # # Collects comma separated arguments from a list of tokens. The arguments # must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions) # where tokencount is the number of tokens consumed, args is a list of arguments, # and positions is a list of integers containing the starting index of each # argument. Each argument is represented by a list of tokens. # # When collecting arguments, leading and trailing whitespace is removed # from each argument. # # This function properly handles nested parenthesis and commas---these do not # define new arguments. # ---------------------------------------------------------------------- def collect_args(self,tokenlist): args = [] positions = [] current_arg = [] nesting = 1 tokenlen = len(tokenlist) # Search for the opening '('. i = 0 while (i < tokenlen) and (tokenlist[i].type in self.t_WS): i += 1 if (i < tokenlen) and (tokenlist[i].value == '('): positions.append(i+1) else: self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments") return 0, [], [] i += 1 while i < tokenlen: t = tokenlist[i] if t.value == '(': current_arg.append(t) nesting += 1 elif t.value == ')': nesting -= 1 if nesting == 0: if current_arg: args.append(self.tokenstrip(current_arg)) positions.append(i) return i+1,args,positions current_arg.append(t) elif t.value == ',' and nesting == 1: args.append(self.tokenstrip(current_arg)) positions.append(i+1) current_arg = [] else: current_arg.append(t) i += 1 # Missing end argument self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments") return 0, [],[] # ---------------------------------------------------------------------- # macro_prescan() # # Examine the macro value (token sequence) and identify patch points # This is used to speed up macro expansion later on---we'll know # right away where to apply patches to the value to form the expansion # ---------------------------------------------------------------------- def macro_prescan(self,macro): macro.patch = [] # Standard macro arguments macro.str_patch = [] # String conversion expansion macro.var_comma_patch = [] # Variadic macro comma patch i = 0 while i < len(macro.value): if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist: argnum = macro.arglist.index(macro.value[i].value) # Conversion of argument to a string if i > 0 and macro.value[i-1].value == '#': macro.value[i] = copy.copy(macro.value[i]) macro.value[i].type = self.t_STRING del macro.value[i-1] macro.str_patch.append((argnum,i-1)) continue # Concatenation elif (i > 0 and macro.value[i-1].value == '##'): macro.patch.append(('c',argnum,i-1)) del macro.value[i-1] continue elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'): macro.patch.append(('c',argnum,i)) i += 1 continue # Standard expansion else: macro.patch.append(('e',argnum,i)) elif macro.value[i].value == '##': if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \ ((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \ (macro.value[i+1].value == macro.vararg): macro.var_comma_patch.append(i-1) i += 1 macro.patch.sort(key=lambda x: x[2],reverse=True) # ---------------------------------------------------------------------- # macro_expand_args() # # Given a Macro and list of arguments (each a token list), this method # returns an expanded version of a macro. The return value is a token sequence # representing the replacement macro tokens # ---------------------------------------------------------------------- def macro_expand_args(self,macro,args): # Make a copy of the macro token sequence rep = [copy.copy(_x) for _x in macro.value] # Make string expansion patches. These do not alter the length of the replacement sequence str_expansion = {} for argnum, i in macro.str_patch: if argnum not in str_expansion: str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\") rep[i] = copy.copy(rep[i]) rep[i].value = str_expansion[argnum] # Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid comma_patch = False if macro.variadic and not args[-1]: for i in macro.var_comma_patch: rep[i] = None comma_patch = True # Make all other patches. The order of these matters. It is assumed that the patch list # has been sorted in reverse order of patch location since replacements will cause the # size of the replacement sequence to expand from the patch point. expanded = { } for ptype, argnum, i in macro.patch: # Concatenation. Argument is left unexpanded if ptype == 'c': rep[i:i+1] = args[argnum] # Normal expansion. Argument is macro expanded first elif ptype == 'e': if argnum not in expanded: expanded[argnum] = self.expand_macros(args[argnum]) rep[i:i+1] = expanded[argnum] # Get rid of removed comma if necessary if comma_patch: rep = [_i for _i in rep if _i] return rep # ---------------------------------------------------------------------- # expand_macros() # # Given a list of tokens, this function performs macro expansion. # The expanded argument is a dictionary that contains macros already # expanded. This is used to prevent infinite recursion. # ---------------------------------------------------------------------- def expand_macros(self,tokens,expanded=None): if expanded is None: expanded = {} i = 0 while i < len(tokens): t = tokens[i] if t.type == self.t_ID: if t.value in self.macros and t.value not in expanded: # Yes, we found a macro match expanded[t.value] = True m = self.macros[t.value] if not m.arglist: # A simple macro ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded) for e in ex: e.lineno = t.lineno tokens[i:i+1] = ex i += len(ex) else: # A macro with arguments j = i + 1 while j < len(tokens) and tokens[j].type in self.t_WS: j += 1 if tokens[j].value == '(': tokcount,args,positions = self.collect_args(tokens[j:]) if not m.variadic and len(args) != len(m.arglist): self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist))) i = j + tokcount elif m.variadic and len(args) < len(m.arglist)-1: if len(m.arglist) > 2: self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1)) else: self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1)) i = j + tokcount else: if m.variadic: if len(args) == len(m.arglist)-1: args.append([]) else: args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1] del args[len(m.arglist):] # Get macro replacement text rep = self.macro_expand_args(m,args) rep = self.expand_macros(rep,expanded) for r in rep: r.lineno = t.lineno tokens[i:j+tokcount] = rep i += len(rep) del expanded[t.value] continue elif t.value == '__LINE__': t.type = self.t_INTEGER t.value = self.t_INTEGER_TYPE(t.lineno) i += 1 return tokens # ---------------------------------------------------------------------- # evalexpr() # # Evaluate an expression token sequence for the purposes of evaluating # integral expressions. # ---------------------------------------------------------------------- def evalexpr(self,tokens): # tokens = tokenize(line) # Search for defined macros i = 0 while i < len(tokens): if tokens[i].type == self.t_ID and tokens[i].value == 'defined': j = i + 1 needparen = False result = "0L" while j < len(tokens): if tokens[j].type in self.t_WS: j += 1 continue elif tokens[j].type == self.t_ID: if tokens[j].value in self.macros: result = "1L" else: result = "0L" if not needparen: break elif tokens[j].value == '(': needparen = True elif tokens[j].value == ')': break else: self.error(self.source,tokens[i].lineno,"Malformed defined()") j += 1 tokens[i].type = self.t_INTEGER tokens[i].value = self.t_INTEGER_TYPE(result) del tokens[i+1:j+1] i += 1 tokens = self.expand_macros(tokens) for i,t in enumerate(tokens): if t.type == self.t_ID: tokens[i] = copy.copy(t) tokens[i].type = self.t_INTEGER tokens[i].value = self.t_INTEGER_TYPE("0L") elif t.type == self.t_INTEGER: tokens[i] = copy.copy(t) # Strip off any trailing suffixes tokens[i].value = str(tokens[i].value) while tokens[i].value[-1] not in "0123456789abcdefABCDEF": tokens[i].value = tokens[i].value[:-1] expr = "".join([str(x.value) for x in tokens]) expr = expr.replace("&&"," and ") expr = expr.replace("||"," or ") expr = expr.replace("!"," not ") try: result = eval(expr) except Exception: self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression") result = 0 return result # ---------------------------------------------------------------------- # parsegen() # # Parse an input string/ # ---------------------------------------------------------------------- def parsegen(self,input,source=None): # Replace trigraph sequences t = trigraph(input) lines = self.group_lines(t) if not source: source = "" self.define("__FILE__ \"%s\"" % source) self.source = source chunk = [] enable = True iftrigger = False ifstack = [] for x in lines: for i,tok in enumerate(x): if tok.type not in self.t_WS: break if tok.value == '#': # Preprocessor directive # insert necessary whitespace instead of eaten tokens for tok in x: if tok.type in self.t_WS and '\n' in tok.value: chunk.append(tok) dirtokens = self.tokenstrip(x[i+1:]) if dirtokens: name = dirtokens[0].value args = self.tokenstrip(dirtokens[1:]) else: name = "" args = [] if name == 'define': if enable: for tok in self.expand_macros(chunk): yield tok chunk = [] self.define(args) elif name == 'include': if enable: for tok in self.expand_macros(chunk): yield tok chunk = [] oldfile = self.macros['__FILE__'] for tok in self.include(args): yield tok self.macros['__FILE__'] = oldfile self.source = source elif name == 'undef': if enable: for tok in self.expand_macros(chunk): yield tok chunk = [] self.undef(args) elif name == 'ifdef': ifstack.append((enable,iftrigger)) if enable: if not args[0].value in self.macros: enable = False iftrigger = False else: iftrigger = True elif name == 'ifndef': ifstack.append((enable,iftrigger)) if enable: if args[0].value in self.macros: enable = False iftrigger = False else: iftrigger = True elif name == 'if': ifstack.append((enable,iftrigger)) if enable: result = self.evalexpr(args) if not result: enable = False iftrigger = False else: iftrigger = True elif name == 'elif': if ifstack: if ifstack[-1][0]: # We only pay attention if outer "if" allows this if enable: # If already true, we flip enable False enable = False elif not iftrigger: # If False, but not triggered yet, we'll check expression result = self.evalexpr(args) if result: enable = True iftrigger = True else: self.error(self.source,dirtokens[0].lineno,"Misplaced #elif") elif name == 'else': if ifstack: if ifstack[-1][0]: if enable: enable = False elif not iftrigger: enable = True iftrigger = True else: self.error(self.source,dirtokens[0].lineno,"Misplaced #else") elif name == 'endif': if ifstack: enable,iftrigger = ifstack.pop() else: self.error(self.source,dirtokens[0].lineno,"Misplaced #endif") else: # Unknown preprocessor directive pass else: # Normal text if enable: chunk.extend(x) for tok in self.expand_macros(chunk): yield tok chunk = [] # ---------------------------------------------------------------------- # include() # # Implementation of file-inclusion # ---------------------------------------------------------------------- def include(self,tokens): # Try to extract the filename and then process an include file if not tokens: return if tokens: if tokens[0].value != '<' and tokens[0].type != self.t_STRING: tokens = self.expand_macros(tokens) if tokens[0].value == '<': # Include <...> i = 1 while i < len(tokens): if tokens[i].value == '>': break i += 1 else: print("Malformed #include <...>") return filename = "".join([x.value for x in tokens[1:i]]) path = self.path + [""] + self.temp_path elif tokens[0].type == self.t_STRING: filename = tokens[0].value[1:-1] path = self.temp_path + [""] + self.path else: print("Malformed #include statement") return for p in path: iname = os.path.join(p,filename) try: data = open(iname,"r").read() dname = os.path.dirname(iname) if dname: self.temp_path.insert(0,dname) for tok in self.parsegen(data,filename): yield tok if dname: del self.temp_path[0] break except IOError: pass else: print("Couldn't find '%s'" % filename) # ---------------------------------------------------------------------- # define() # # Define a new macro # ---------------------------------------------------------------------- def define(self,tokens): if isinstance(tokens,STRING_TYPES): tokens = self.tokenize(tokens) linetok = tokens try: name = linetok[0] if len(linetok) > 1: mtype = linetok[1] else: mtype = None if not mtype: m = Macro(name.value,[]) self.macros[name.value] = m elif mtype.type in self.t_WS: # A normal macro m = Macro(name.value,self.tokenstrip(linetok[2:])) self.macros[name.value] = m elif mtype.value == '(': # A macro with arguments tokcount, args, positions = self.collect_args(linetok[1:]) variadic = False for a in args: if variadic: print("No more arguments may follow a variadic argument") break astr = "".join([str(_i.value) for _i in a]) if astr == "...": variadic = True a[0].type = self.t_ID a[0].value = '__VA_ARGS__' variadic = True del a[1:] continue elif astr[-3:] == "..." and a[0].type == self.t_ID: variadic = True del a[1:] # If, for some reason, "." is part of the identifier, strip off the name for the purposes # of macro expansion if a[0].value[-3:] == '...': a[0].value = a[0].value[:-3] continue if len(a) > 1 or a[0].type != self.t_ID: print("Invalid macro argument") break else: mvalue = self.tokenstrip(linetok[1+tokcount:]) i = 0 while i < len(mvalue): if i+1 < len(mvalue): if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##': del mvalue[i] continue elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS: del mvalue[i+1] i += 1 m = Macro(name.value,mvalue,[x[0].value for x in args],variadic) self.macro_prescan(m) self.macros[name.value] = m else: print("Bad macro definition") except LookupError: print("Bad macro definition") # ---------------------------------------------------------------------- # undef() # # Undefine a macro # ---------------------------------------------------------------------- def undef(self,tokens): id = tokens[0].value try: del self.macros[id] except LookupError: pass # ---------------------------------------------------------------------- # parse() # # Parse input text. # ---------------------------------------------------------------------- def parse(self,input,source=None,ignore={}): self.ignore = ignore self.parser = self.parsegen(input,source) # ---------------------------------------------------------------------- # token() # # Method to return individual tokens # ---------------------------------------------------------------------- def token(self): try: while True: tok = next(self.parser) if tok.type not in self.ignore: return tok except StopIteration: self.parser = None return None if __name__ == '__main__': import ply.lex as lex lexer = lex.lex() # Run a preprocessor import sys f = open(sys.argv[1]) input = f.read() p = Preprocessor(lexer) p.parse(input,sys.argv[1]) while True: tok = p.token() if not tok: break print(p.source, tok)
ab8b3ce90c11b1845adb42fdb9e4b17e1fa13e28697ed0630cebd86b6fd24b66
# PLY package # Author: David Beazley ([email protected]) __version__ = '3.9' __all__ = ['lex','yacc']
a323221afd69924cda39af2cd98c654f091b0aee12e60a03275ce18a98da3679
# ----------------------------------------------------------------------------- # ply: yacc.py # # Copyright (C) 2001-2016 # David M. Beazley (Dabeaz LLC) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the David Beazley or Dabeaz LLC may be used to # endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------------------------- # # This implements an LR parser that is constructed from grammar rules defined # as Python functions. The grammer is specified by supplying the BNF inside # Python documentation strings. The inspiration for this technique was borrowed # from John Aycock's Spark parsing system. PLY might be viewed as cross between # Spark and the GNU bison utility. # # The current implementation is only somewhat object-oriented. The # LR parser itself is defined in terms of an object (which allows multiple # parsers to co-exist). However, most of the variables used during table # construction are defined in terms of global variables. Users shouldn't # notice unless they are trying to define multiple parsers at the same # time using threads (in which case they should have their head examined). # # This implementation supports both SLR and LALR(1) parsing. LALR(1) # support was originally implemented by Elias Ioup ([email protected]), # using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles, # Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced # by the more efficient DeRemer and Pennello algorithm. # # :::::::: WARNING ::::::: # # Construction of LR parsing tables is fairly complicated and expensive. # To make this module run fast, a *LOT* of work has been put into # optimization---often at the expensive of readability and what might # consider to be good Python "coding style." Modify the code at your # own risk! # ---------------------------------------------------------------------------- import re import types import sys import os.path import inspect import base64 import warnings __version__ = '3.9' __tabversion__ = '3.8' #----------------------------------------------------------------------------- # === User configurable parameters === # # Change these to modify the default behavior of yacc (if you wish) #----------------------------------------------------------------------------- yaccdebug = True # Debugging mode. If set, yacc generates a # a 'parser.out' file in the current directory debug_file = 'parser.out' # Default name of the debugging file tab_module = 'parsetab' # Default name of the table module default_lr = 'LALR' # Default LR table generation method error_count = 3 # Number of symbols that must be shifted to leave recovery mode yaccdevel = False # Set to True if developing yacc. This turns off optimized # implementations of certain functions. resultlimit = 40 # Size limit of results when running in debug mode. pickle_protocol = 0 # Protocol to use when writing pickle files # String type-checking compatibility if sys.version_info[0] < 3: string_types = basestring else: string_types = str MAXINT = sys.maxsize # This object is a stand-in for a logging object created by the # logging module. PLY will use this by default to create things # such as the parser.out file. If a user wants more detailed # information, they can create their own logging object and pass # it into PLY. class PlyLogger(object): def __init__(self, f): self.f = f def debug(self, msg, *args, **kwargs): self.f.write((msg % args) + '\n') info = debug def warning(self, msg, *args, **kwargs): self.f.write('WARNING: ' + (msg % args) + '\n') def error(self, msg, *args, **kwargs): self.f.write('ERROR: ' + (msg % args) + '\n') critical = debug # Null logger is used when no output is generated. Does nothing. class NullLogger(object): def __getattribute__(self, name): return self def __call__(self, *args, **kwargs): return self # Exception raised for yacc-related errors class YaccError(Exception): pass # Format the result message that the parser produces when running in debug mode. def format_result(r): repr_str = repr(r) if '\n' in repr_str: repr_str = repr(repr_str) if len(repr_str) > resultlimit: repr_str = repr_str[:resultlimit] + ' ...' result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str) return result # Format stack entries when the parser is running in debug mode def format_stack_entry(r): repr_str = repr(r) if '\n' in repr_str: repr_str = repr(repr_str) if len(repr_str) < 16: return repr_str else: return '<%s @ 0x%x>' % (type(r).__name__, id(r)) # Panic mode error recovery support. This feature is being reworked--much of the # code here is to offer a deprecation/backwards compatible transition _errok = None _token = None _restart = None _warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error(). Instead, invoke the methods on the associated parser instance: def p_error(p): ... # Use parser.errok(), parser.token(), parser.restart() ... parser = yacc.yacc() ''' def errok(): warnings.warn(_warnmsg) return _errok() def restart(): warnings.warn(_warnmsg) return _restart() def token(): warnings.warn(_warnmsg) return _token() # Utility function to call the p_error() function with some deprecation hacks def call_errorfunc(errorfunc, token, parser): global _errok, _token, _restart _errok = parser.errok _token = parser.token _restart = parser.restart r = errorfunc(token) try: del _errok, _token, _restart except NameError: pass return r #----------------------------------------------------------------------------- # === LR Parsing Engine === # # The following classes are used for the LR parser itself. These are not # used during table construction and are independent of the actual LR # table generation algorithm #----------------------------------------------------------------------------- # This class is used to hold non-terminal grammar symbols during parsing. # It normally has the following attributes set: # .type = Grammar symbol type # .value = Symbol value # .lineno = Starting line number # .endlineno = Ending line number (optional, set automatically) # .lexpos = Starting lex position # .endlexpos = Ending lex position (optional, set automatically) class YaccSymbol: def __str__(self): return self.type def __repr__(self): return str(self) # This class is a wrapper around the objects actually passed to each # grammar rule. Index lookup and assignment actually assign the # .value attribute of the underlying YaccSymbol object. # The lineno() method returns the line number of a given # item (or 0 if not defined). The linespan() method returns # a tuple of (startline,endline) representing the range of lines # for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos) # representing the range of positional information for a symbol. class YaccProduction: def __init__(self, s, stack=None): self.slice = s self.stack = stack self.lexer = None self.parser = None def __getitem__(self, n): if isinstance(n, slice): return [s.value for s in self.slice[n]] elif n >= 0: return self.slice[n].value else: return self.stack[n].value def __setitem__(self, n, v): self.slice[n].value = v def __getslice__(self, i, j): return [s.value for s in self.slice[i:j]] def __len__(self): return len(self.slice) def lineno(self, n): return getattr(self.slice[n], 'lineno', 0) def set_lineno(self, n, lineno): self.slice[n].lineno = lineno def linespan(self, n): startline = getattr(self.slice[n], 'lineno', 0) endline = getattr(self.slice[n], 'endlineno', startline) return startline, endline def lexpos(self, n): return getattr(self.slice[n], 'lexpos', 0) def lexspan(self, n): startpos = getattr(self.slice[n], 'lexpos', 0) endpos = getattr(self.slice[n], 'endlexpos', startpos) return startpos, endpos def error(self): raise SyntaxError # ----------------------------------------------------------------------------- # == LRParser == # # The LR Parsing engine. # ----------------------------------------------------------------------------- class LRParser: def __init__(self, lrtab, errorf): self.productions = lrtab.lr_productions self.action = lrtab.lr_action self.goto = lrtab.lr_goto self.errorfunc = errorf self.set_defaulted_states() self.errorok = True def errok(self): self.errorok = True def restart(self): del self.statestack[:] del self.symstack[:] sym = YaccSymbol() sym.type = '$end' self.symstack.append(sym) self.statestack.append(0) # Defaulted state support. # This method identifies parser states where there is only one possible reduction action. # For such states, the parser can make a choose to make a rule reduction without consuming # the next look-ahead token. This delayed invocation of the tokenizer can be useful in # certain kinds of advanced parsing situations where the lexer and parser interact with # each other or change states (i.e., manipulation of scope, lexer states, etc.). # # See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions def set_defaulted_states(self): self.defaulted_states = {} for state, actions in self.action.items(): rules = list(actions.values()) if len(rules) == 1 and rules[0] < 0: self.defaulted_states[state] = rules[0] def disable_defaulted_states(self): self.defaulted_states = {} def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): if debug or yaccdevel: if isinstance(debug, int): debug = PlyLogger(sys.stderr) return self.parsedebug(input, lexer, debug, tracking, tokenfunc) elif tracking: return self.parseopt(input, lexer, debug, tracking, tokenfunc) else: return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parsedebug(). # # This is the debugging enabled version of parse(). All changes made to the # parsing engine should be made here. Optimized versions of this function # are automatically created by the ply/ygen.py script. This script cuts out # sections enclosed in markers such as this: # # #--! DEBUG # statements # #--! DEBUG # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): #--! parsedebug-start lookahead = None # Current lookahead symbol lookaheadstack = [] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) defaulted_states = self.defaulted_states # Local reference to defaulted states pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery #--! DEBUG debug.info('PLY: PARSE DEBUG START') #--! DEBUG # If no lexer was given, we will try to use the lex module if not lexer: from . import lex lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set the parser() token method (sometimes used in error recovery) self.token = get_token # Set up the state and symbol stacks statestack = [] # Stack of parsing states self.statestack = statestack symstack = [] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while True: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer #--! DEBUG debug.debug('') debug.debug('State : %s', state) #--! DEBUG if state not in defaulted_states: if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) else: t = defaulted_states[state] #--! DEBUG debug.debug('Defaulted state %s: Reduce using %d', state, -t) #--! DEBUG #--! DEBUG debug.debug('Stack : %s', ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) #--! DEBUG if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t #--! DEBUG debug.debug('Action : Shift and goto state %s', t) #--! DEBUG symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -= 1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None #--! DEBUG if plen: debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, '['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']', goto[statestack[-1-plen]][pname]) else: debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [], goto[statestack[-1]][pname]) #--! DEBUG if plen: targ = symstack[-plen-1:] targ[0] = sym #--! TRACKING if tracking: t1 = targ[1] sym.lineno = t1.lineno sym.lexpos = t1.lexpos t1 = targ[-1] sym.endlineno = getattr(t1, 'endlineno', t1.lineno) sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) #--! TRACKING # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] self.state = state p.callable(pslice) del statestack[-plen:] #--! DEBUG debug.info('Result : %s', format_result(pslice[0])) #--! DEBUG symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token symstack.extend(targ[1:-1]) # Put the production slice back on the stack statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: #--! TRACKING if tracking: sym.lineno = lexer.lineno sym.lexpos = lexer.lexpos #--! TRACKING targ = [sym] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object self.state = state p.callable(pslice) #--! DEBUG debug.info('Result : %s', format_result(pslice[0])) #--! DEBUG symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] result = getattr(n, 'value', None) #--! DEBUG debug.info('Done : Returning %s', format_result(result)) debug.info('PLY: PARSE DEBUG END') #--! DEBUG return result if t is None: #--! DEBUG debug.error('Error : %s', ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) #--! DEBUG # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = False errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: if errtoken and not hasattr(errtoken, 'lexer'): errtoken.lexer = lexer self.state = state tok = call_errorfunc(self.errorfunc, errtoken, self) if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken, 'lineno'): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) else: sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) else: sys.stderr.write('yacc: Parse error in input. EOF\n') return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue #--! TRACKING if tracking: sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) #--! TRACKING lookahead = None continue # Create the error symbol for the first time and make it the new lookahead symbol t = YaccSymbol() t.type = 'error' if hasattr(lookahead, 'lineno'): t.lineno = t.endlineno = lookahead.lineno if hasattr(lookahead, 'lexpos'): t.lexpos = t.endlexpos = lookahead.lexpos t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: sym = symstack.pop() #--! TRACKING if tracking: lookahead.lineno = sym.lineno lookahead.lexpos = sym.lexpos #--! TRACKING statestack.pop() state = statestack[-1] continue # Call an error function here raise RuntimeError('yacc: internal parser error!!!\n') #--! parsedebug-end # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parseopt(). # # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY! # This code is automatically generated by the ply/ygen.py script. Make # changes to the parsedebug() method instead. # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): #--! parseopt-start lookahead = None # Current lookahead symbol lookaheadstack = [] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) defaulted_states = self.defaulted_states # Local reference to defaulted states pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery # If no lexer was given, we will try to use the lex module if not lexer: from . import lex lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set the parser() token method (sometimes used in error recovery) self.token = get_token # Set up the state and symbol stacks statestack = [] # Stack of parsing states self.statestack = statestack symstack = [] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while True: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer if state not in defaulted_states: if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) else: t = defaulted_states[state] if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -= 1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None if plen: targ = symstack[-plen-1:] targ[0] = sym #--! TRACKING if tracking: t1 = targ[1] sym.lineno = t1.lineno sym.lexpos = t1.lexpos t1 = targ[-1] sym.endlineno = getattr(t1, 'endlineno', t1.lineno) sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) #--! TRACKING # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] self.state = state p.callable(pslice) del statestack[-plen:] symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token symstack.extend(targ[1:-1]) # Put the production slice back on the stack statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: #--! TRACKING if tracking: sym.lineno = lexer.lineno sym.lexpos = lexer.lexpos #--! TRACKING targ = [sym] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object self.state = state p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] result = getattr(n, 'value', None) return result if t is None: # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = False errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: if errtoken and not hasattr(errtoken, 'lexer'): errtoken.lexer = lexer self.state = state tok = call_errorfunc(self.errorfunc, errtoken, self) if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken, 'lineno'): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) else: sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) else: sys.stderr.write('yacc: Parse error in input. EOF\n') return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue #--! TRACKING if tracking: sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) #--! TRACKING lookahead = None continue # Create the error symbol for the first time and make it the new lookahead symbol t = YaccSymbol() t.type = 'error' if hasattr(lookahead, 'lineno'): t.lineno = t.endlineno = lookahead.lineno if hasattr(lookahead, 'lexpos'): t.lexpos = t.endlexpos = lookahead.lexpos t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: sym = symstack.pop() #--! TRACKING if tracking: lookahead.lineno = sym.lineno lookahead.lexpos = sym.lexpos #--! TRACKING statestack.pop() state = statestack[-1] continue # Call an error function here raise RuntimeError('yacc: internal parser error!!!\n') #--! parseopt-end # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parseopt_notrack(). # # Optimized version of parseopt() with line number tracking removed. # DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated # by the ply/ygen.py script. Make changes to the parsedebug() method instead. # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): #--! parseopt-notrack-start lookahead = None # Current lookahead symbol lookaheadstack = [] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) defaulted_states = self.defaulted_states # Local reference to defaulted states pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery # If no lexer was given, we will try to use the lex module if not lexer: from . import lex lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set the parser() token method (sometimes used in error recovery) self.token = get_token # Set up the state and symbol stacks statestack = [] # Stack of parsing states self.statestack = statestack symstack = [] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while True: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer if state not in defaulted_states: if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) else: t = defaulted_states[state] if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -= 1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None if plen: targ = symstack[-plen-1:] targ[0] = sym # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] self.state = state p.callable(pslice) del statestack[-plen:] symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token symstack.extend(targ[1:-1]) # Put the production slice back on the stack statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: targ = [sym] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object self.state = state p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) # Save the current lookahead token statestack.pop() # Pop back one state (before the reduce) state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] result = getattr(n, 'value', None) return result if t is None: # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = False errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: if errtoken and not hasattr(errtoken, 'lexer'): errtoken.lexer = lexer self.state = state tok = call_errorfunc(self.errorfunc, errtoken, self) if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken, 'lineno'): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) else: sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) else: sys.stderr.write('yacc: Parse error in input. EOF\n') return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue lookahead = None continue # Create the error symbol for the first time and make it the new lookahead symbol t = YaccSymbol() t.type = 'error' if hasattr(lookahead, 'lineno'): t.lineno = t.endlineno = lookahead.lineno if hasattr(lookahead, 'lexpos'): t.lexpos = t.endlexpos = lookahead.lexpos t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: sym = symstack.pop() statestack.pop() state = statestack[-1] continue # Call an error function here raise RuntimeError('yacc: internal parser error!!!\n') #--! parseopt-notrack-end # ----------------------------------------------------------------------------- # === Grammar Representation === # # The following functions, classes, and variables are used to represent and # manipulate the rules that make up a grammar. # ----------------------------------------------------------------------------- # regex matching identifiers _is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$') # ----------------------------------------------------------------------------- # class Production: # # This class stores the raw information about a single production or grammar rule. # A grammar rule refers to a specification such as this: # # expr : expr PLUS term # # Here are the basic attributes defined on all productions # # name - Name of the production. For example 'expr' # prod - A list of symbols on the right side ['expr','PLUS','term'] # prec - Production precedence level # number - Production number. # func - Function that executes on reduce # file - File where production function is defined # lineno - Line number where production function is defined # # The following attributes are defined or optional. # # len - Length of the production (number of symbols on right hand side) # usyms - Set of unique symbols found in the production # ----------------------------------------------------------------------------- class Production(object): reduced = 0 def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0): self.name = name self.prod = tuple(prod) self.number = number self.func = func self.callable = None self.file = file self.line = line self.prec = precedence # Internal settings used during table construction self.len = len(self.prod) # Length of the production # Create a list of unique production symbols used in the production self.usyms = [] for s in self.prod: if s not in self.usyms: self.usyms.append(s) # List of all LR items for the production self.lr_items = [] self.lr_next = None # Create a string representation if self.prod: self.str = '%s -> %s' % (self.name, ' '.join(self.prod)) else: self.str = '%s -> <empty>' % self.name def __str__(self): return self.str def __repr__(self): return 'Production(' + str(self) + ')' def __len__(self): return len(self.prod) def __nonzero__(self): return 1 def __getitem__(self, index): return self.prod[index] # Return the nth lr_item from the production (or None if at the end) def lr_item(self, n): if n > len(self.prod): return None p = LRItem(self, n) # Precompute the list of productions immediately following. try: p.lr_after = Prodnames[p.prod[n+1]] except (IndexError, KeyError): p.lr_after = [] try: p.lr_before = p.prod[n-1] except IndexError: p.lr_before = None return p # Bind the production function name to a callable def bind(self, pdict): if self.func: self.callable = pdict[self.func] # This class serves as a minimal standin for Production objects when # reading table data from files. It only contains information # actually used by the LR parsing engine, plus some additional # debugging information. class MiniProduction(object): def __init__(self, str, name, len, func, file, line): self.name = name self.len = len self.func = func self.callable = None self.file = file self.line = line self.str = str def __str__(self): return self.str def __repr__(self): return 'MiniProduction(%s)' % self.str # Bind the production function name to a callable def bind(self, pdict): if self.func: self.callable = pdict[self.func] # ----------------------------------------------------------------------------- # class LRItem # # This class represents a specific stage of parsing a production rule. For # example: # # expr : expr . PLUS term # # In the above, the "." represents the current location of the parse. Here # basic attributes: # # name - Name of the production. For example 'expr' # prod - A list of symbols on the right side ['expr','.', 'PLUS','term'] # number - Production number. # # lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term' # then lr_next refers to 'expr -> expr PLUS . term' # lr_index - LR item index (location of the ".") in the prod list. # lookaheads - LALR lookahead symbols for this item # len - Length of the production (number of symbols on right hand side) # lr_after - List of all productions that immediately follow # lr_before - Grammar symbol immediately before # ----------------------------------------------------------------------------- class LRItem(object): def __init__(self, p, n): self.name = p.name self.prod = list(p.prod) self.number = p.number self.lr_index = n self.lookaheads = {} self.prod.insert(n, '.') self.prod = tuple(self.prod) self.len = len(self.prod) self.usyms = p.usyms def __str__(self): if self.prod: s = '%s -> %s' % (self.name, ' '.join(self.prod)) else: s = '%s -> <empty>' % self.name return s def __repr__(self): return 'LRItem(' + str(self) + ')' # ----------------------------------------------------------------------------- # rightmost_terminal() # # Return the rightmost terminal from a list of symbols. Used in add_production() # ----------------------------------------------------------------------------- def rightmost_terminal(symbols, terminals): i = len(symbols) - 1 while i >= 0: if symbols[i] in terminals: return symbols[i] i -= 1 return None # ----------------------------------------------------------------------------- # === GRAMMAR CLASS === # # The following class represents the contents of the specified grammar along # with various computed properties such as first sets, follow sets, LR items, etc. # This data is used for critical parts of the table generation process later. # ----------------------------------------------------------------------------- class GrammarError(YaccError): pass class Grammar(object): def __init__(self, terminals): self.Productions = [None] # A list of all of the productions. The first # entry is always reserved for the purpose of # building an augmented grammar self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all # productions of that nonterminal. self.Prodmap = {} # A dictionary that is only used to detect duplicate # productions. self.Terminals = {} # A dictionary mapping the names of terminal symbols to a # list of the rules where they are used. for term in terminals: self.Terminals[term] = [] self.Terminals['error'] = [] self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list # of rule numbers where they are used. self.First = {} # A dictionary of precomputed FIRST(x) symbols self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the # form ('right',level) or ('nonassoc', level) or ('left',level) self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer. # This is only used to provide error checking and to generate # a warning about unused precedence rules. self.Start = None # Starting symbol for the grammar def __len__(self): return len(self.Productions) def __getitem__(self, index): return self.Productions[index] # ----------------------------------------------------------------------------- # set_precedence() # # Sets the precedence for a given terminal. assoc is the associativity such as # 'left','right', or 'nonassoc'. level is a numeric level. # # ----------------------------------------------------------------------------- def set_precedence(self, term, assoc, level): assert self.Productions == [None], 'Must call set_precedence() before add_production()' if term in self.Precedence: raise GrammarError('Precedence already specified for terminal %r' % term) if assoc not in ['left', 'right', 'nonassoc']: raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'") self.Precedence[term] = (assoc, level) # ----------------------------------------------------------------------------- # add_production() # # Given an action function, this function assembles a production rule and # computes its precedence level. # # The production rule is supplied as a list of symbols. For example, # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and # symbols ['expr','PLUS','term']. # # Precedence is determined by the precedence of the right-most non-terminal # or the precedence of a terminal specified by %prec. # # A variety of error checks are performed to make sure production symbols # are valid and that %prec is used correctly. # ----------------------------------------------------------------------------- def add_production(self, prodname, syms, func=None, file='', line=0): if prodname in self.Terminals: raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname)) if prodname == 'error': raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname)) if not _is_identifier.match(prodname): raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname)) # Look for literal tokens for n, s in enumerate(syms): if s[0] in "'\"": try: c = eval(s) if (len(c) > 1): raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' % (file, line, s, prodname)) if c not in self.Terminals: self.Terminals[c] = [] syms[n] = c continue except SyntaxError: pass if not _is_identifier.match(s) and s != '%prec': raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname)) # Determine the precedence level if '%prec' in syms: if syms[-1] == '%prec': raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line)) if syms[-2] != '%prec': raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' % (file, line)) precname = syms[-1] prodprec = self.Precedence.get(precname) if not prodprec: raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname)) else: self.UsedPrecedence.add(precname) del syms[-2:] # Drop %prec from the rule else: # If no %prec, precedence is determined by the rightmost terminal symbol precname = rightmost_terminal(syms, self.Terminals) prodprec = self.Precedence.get(precname, ('right', 0)) # See if the rule is already in the rulemap map = '%s -> %s' % (prodname, syms) if map in self.Prodmap: m = self.Prodmap[map] raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) + 'Previous definition at %s:%d' % (m.file, m.line)) # From this point on, everything is valid. Create a new Production instance pnumber = len(self.Productions) if prodname not in self.Nonterminals: self.Nonterminals[prodname] = [] # Add the production number to Terminals and Nonterminals for t in syms: if t in self.Terminals: self.Terminals[t].append(pnumber) else: if t not in self.Nonterminals: self.Nonterminals[t] = [] self.Nonterminals[t].append(pnumber) # Create a production and add it to the list of productions p = Production(pnumber, prodname, syms, prodprec, func, file, line) self.Productions.append(p) self.Prodmap[map] = p # Add to the global productions list try: self.Prodnames[prodname].append(p) except KeyError: self.Prodnames[prodname] = [p] # ----------------------------------------------------------------------------- # set_start() # # Sets the starting symbol and creates the augmented grammar. Production # rule 0 is S' -> start where start is the start symbol. # ----------------------------------------------------------------------------- def set_start(self, start=None): if not start: start = self.Productions[1].name if start not in self.Nonterminals: raise GrammarError('start symbol %s undefined' % start) self.Productions[0] = Production(0, "S'", [start]) self.Nonterminals[start].append(0) self.Start = start # ----------------------------------------------------------------------------- # find_unreachable() # # Find all of the nonterminal symbols that can't be reached from the starting # symbol. Returns a list of nonterminals that can't be reached. # ----------------------------------------------------------------------------- def find_unreachable(self): # Mark all symbols that are reachable from a symbol s def mark_reachable_from(s): if s in reachable: return reachable.add(s) for p in self.Prodnames.get(s, []): for r in p.prod: mark_reachable_from(r) reachable = set() mark_reachable_from(self.Productions[0].prod[0]) return [s for s in self.Nonterminals if s not in reachable] # ----------------------------------------------------------------------------- # infinite_cycles() # # This function looks at the various parsing rules and tries to detect # infinite recursion cycles (grammar rules where there is no possible way # to derive a string of only terminals). # ----------------------------------------------------------------------------- def infinite_cycles(self): terminates = {} # Terminals: for t in self.Terminals: terminates[t] = True terminates['$end'] = True # Nonterminals: # Initialize to false: for n in self.Nonterminals: terminates[n] = False # Then propagate termination until no change: while True: some_change = False for (n, pl) in self.Prodnames.items(): # Nonterminal n terminates iff any of its productions terminates. for p in pl: # Production p terminates iff all of its rhs symbols terminate. for s in p.prod: if not terminates[s]: # The symbol s does not terminate, # so production p does not terminate. p_terminates = False break else: # didn't break from the loop, # so every symbol s terminates # so production p terminates. p_terminates = True if p_terminates: # symbol n terminates! if not terminates[n]: terminates[n] = True some_change = True # Don't need to consider any more productions for this n. break if not some_change: break infinite = [] for (s, term) in terminates.items(): if not term: if s not in self.Prodnames and s not in self.Terminals and s != 'error': # s is used-but-not-defined, and we've already warned of that, # so it would be overkill to say that it's also non-terminating. pass else: infinite.append(s) return infinite # ----------------------------------------------------------------------------- # undefined_symbols() # # Find all symbols that were used the grammar, but not defined as tokens or # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol # and prod is the production where the symbol was used. # ----------------------------------------------------------------------------- def undefined_symbols(self): result = [] for p in self.Productions: if not p: continue for s in p.prod: if s not in self.Prodnames and s not in self.Terminals and s != 'error': result.append((s, p)) return result # ----------------------------------------------------------------------------- # unused_terminals() # # Find all terminals that were defined, but not used by the grammar. Returns # a list of all symbols. # ----------------------------------------------------------------------------- def unused_terminals(self): unused_tok = [] for s, v in self.Terminals.items(): if s != 'error' and not v: unused_tok.append(s) return unused_tok # ------------------------------------------------------------------------------ # unused_rules() # # Find all grammar rules that were defined, but not used (maybe not reachable) # Returns a list of productions. # ------------------------------------------------------------------------------ def unused_rules(self): unused_prod = [] for s, v in self.Nonterminals.items(): if not v: p = self.Prodnames[s][0] unused_prod.append(p) return unused_prod # ----------------------------------------------------------------------------- # unused_precedence() # # Returns a list of tuples (term,precedence) corresponding to precedence # rules that were never used by the grammar. term is the name of the terminal # on which precedence was applied and precedence is a string such as 'left' or # 'right' corresponding to the type of precedence. # ----------------------------------------------------------------------------- def unused_precedence(self): unused = [] for termname in self.Precedence: if not (termname in self.Terminals or termname in self.UsedPrecedence): unused.append((termname, self.Precedence[termname][0])) return unused # ------------------------------------------------------------------------- # _first() # # Compute the value of FIRST1(beta) where beta is a tuple of symbols. # # During execution of compute_first1, the result may be incomplete. # Afterward (e.g., when called from compute_follow()), it will be complete. # ------------------------------------------------------------------------- def _first(self, beta): # We are computing First(x1,x2,x3,...,xn) result = [] for x in beta: x_produces_empty = False # Add all the non-<empty> symbols of First[x] to the result. for f in self.First[x]: if f == '<empty>': x_produces_empty = True else: if f not in result: result.append(f) if x_produces_empty: # We have to consider the next x in beta, # i.e. stay in the loop. pass else: # We don't have to consider any further symbols in beta. break else: # There was no 'break' from the loop, # so x_produces_empty was true for all x in beta, # so beta produces empty as well. result.append('<empty>') return result # ------------------------------------------------------------------------- # compute_first() # # Compute the value of FIRST1(X) for all symbols # ------------------------------------------------------------------------- def compute_first(self): if self.First: return self.First # Terminals: for t in self.Terminals: self.First[t] = [t] self.First['$end'] = ['$end'] # Nonterminals: # Initialize to the empty set: for n in self.Nonterminals: self.First[n] = [] # Then propagate symbols until no change: while True: some_change = False for n in self.Nonterminals: for p in self.Prodnames[n]: for f in self._first(p.prod): if f not in self.First[n]: self.First[n].append(f) some_change = True if not some_change: break return self.First # --------------------------------------------------------------------- # compute_follow() # # Computes all of the follow sets for every non-terminal symbol. The # follow set is the set of all symbols that might follow a given # non-terminal. See the Dragon book, 2nd Ed. p. 189. # --------------------------------------------------------------------- def compute_follow(self, start=None): # If already computed, return the result if self.Follow: return self.Follow # If first sets not computed yet, do that first. if not self.First: self.compute_first() # Add '$end' to the follow list of the start symbol for k in self.Nonterminals: self.Follow[k] = [] if not start: start = self.Productions[1].name self.Follow[start] = ['$end'] while True: didadd = False for p in self.Productions[1:]: # Here is the production set for i, B in enumerate(p.prod): if B in self.Nonterminals: # Okay. We got a non-terminal in a production fst = self._first(p.prod[i+1:]) hasempty = False for f in fst: if f != '<empty>' and f not in self.Follow[B]: self.Follow[B].append(f) didadd = True if f == '<empty>': hasempty = True if hasempty or i == (len(p.prod)-1): # Add elements of follow(a) to follow(b) for f in self.Follow[p.name]: if f not in self.Follow[B]: self.Follow[B].append(f) didadd = True if not didadd: break return self.Follow # ----------------------------------------------------------------------------- # build_lritems() # # This function walks the list of productions and builds a complete set of the # LR items. The LR items are stored in two ways: First, they are uniquely # numbered and placed in the list _lritems. Second, a linked list of LR items # is built for each production. For example: # # E -> E PLUS E # # Creates the list # # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ] # ----------------------------------------------------------------------------- def build_lritems(self): for p in self.Productions: lastlri = p i = 0 lr_items = [] while True: if i > len(p): lri = None else: lri = LRItem(p, i) # Precompute the list of productions immediately following try: lri.lr_after = self.Prodnames[lri.prod[i+1]] except (IndexError, KeyError): lri.lr_after = [] try: lri.lr_before = lri.prod[i-1] except IndexError: lri.lr_before = None lastlri.lr_next = lri if not lri: break lr_items.append(lri) lastlri = lri i += 1 p.lr_items = lr_items # ----------------------------------------------------------------------------- # == Class LRTable == # # This basic class represents a basic table of LR parsing information. # Methods for generating the tables are not defined here. They are defined # in the derived class LRGeneratedTable. # ----------------------------------------------------------------------------- class VersionError(YaccError): pass class LRTable(object): def __init__(self): self.lr_action = None self.lr_goto = None self.lr_productions = None self.lr_method = None def read_table(self, module): if isinstance(module, types.ModuleType): parsetab = module else: exec('import %s' % module) parsetab = sys.modules[module] if parsetab._tabversion != __tabversion__: raise VersionError('yacc table file version is out of date') self.lr_action = parsetab._lr_action self.lr_goto = parsetab._lr_goto self.lr_productions = [] for p in parsetab._lr_productions: self.lr_productions.append(MiniProduction(*p)) self.lr_method = parsetab._lr_method return parsetab._lr_signature def read_pickle(self, filename): try: import cPickle as pickle except ImportError: import pickle if not os.path.exists(filename): raise ImportError in_f = open(filename, 'rb') tabversion = pickle.load(in_f) if tabversion != __tabversion__: raise VersionError('yacc table file version is out of date') self.lr_method = pickle.load(in_f) signature = pickle.load(in_f) self.lr_action = pickle.load(in_f) self.lr_goto = pickle.load(in_f) productions = pickle.load(in_f) self.lr_productions = [] for p in productions: self.lr_productions.append(MiniProduction(*p)) in_f.close() return signature # Bind all production function names to callable objects in pdict def bind_callables(self, pdict): for p in self.lr_productions: p.bind(pdict) # ----------------------------------------------------------------------------- # === LR Generator === # # The following classes and functions are used to generate LR parsing tables on # a grammar. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # digraph() # traverse() # # The following two functions are used to compute set valued functions # of the form: # # F(x) = F'(x) U U{F(y) | x R y} # # This is used to compute the values of Read() sets as well as FOLLOW sets # in LALR(1) generation. # # Inputs: X - An input set # R - A relation # FP - Set-valued function # ------------------------------------------------------------------------------ def digraph(X, R, FP): N = {} for x in X: N[x] = 0 stack = [] F = {} for x in X: if N[x] == 0: traverse(x, N, stack, F, X, R, FP) return F def traverse(x, N, stack, F, X, R, FP): stack.append(x) d = len(stack) N[x] = d F[x] = FP(x) # F(X) <- F'(x) rel = R(x) # Get y's related to x for y in rel: if N[y] == 0: traverse(y, N, stack, F, X, R, FP) N[x] = min(N[x], N[y]) for a in F.get(y, []): if a not in F[x]: F[x].append(a) if N[x] == d: N[stack[-1]] = MAXINT F[stack[-1]] = F[x] element = stack.pop() while element != x: N[stack[-1]] = MAXINT F[stack[-1]] = F[x] element = stack.pop() class LALRError(YaccError): pass # ----------------------------------------------------------------------------- # == LRGeneratedTable == # # This class implements the LR table generation algorithm. There are no # public methods except for write() # ----------------------------------------------------------------------------- class LRGeneratedTable(LRTable): def __init__(self, grammar, method='LALR', log=None): if method not in ['SLR', 'LALR']: raise LALRError('Unsupported method %s' % method) self.grammar = grammar self.lr_method = method # Set up the logger if not log: log = NullLogger() self.log = log # Internal attributes self.lr_action = {} # Action table self.lr_goto = {} # Goto table self.lr_productions = grammar.Productions # Copy of grammar Production array self.lr_goto_cache = {} # Cache of computed gotos self.lr0_cidhash = {} # Cache of closures self._add_count = 0 # Internal counter used to detect cycles # Diagonistic information filled in by the table generator self.sr_conflict = 0 self.rr_conflict = 0 self.conflicts = [] # List of conflicts self.sr_conflicts = [] self.rr_conflicts = [] # Build the tables self.grammar.build_lritems() self.grammar.compute_first() self.grammar.compute_follow() self.lr_parse_table() # Compute the LR(0) closure operation on I, where I is a set of LR(0) items. def lr0_closure(self, I): self._add_count += 1 # Add everything in I to J J = I[:] didadd = True while didadd: didadd = False for j in J: for x in j.lr_after: if getattr(x, 'lr0_added', 0) == self._add_count: continue # Add B --> .G to J J.append(x.lr_next) x.lr0_added = self._add_count didadd = True return J # Compute the LR(0) goto function goto(I,X) where I is a set # of LR(0) items and X is a grammar symbol. This function is written # in a way that guarantees uniqueness of the generated goto sets # (i.e. the same goto set will never be returned as two different Python # objects). With uniqueness, we can later do fast set comparisons using # id(obj) instead of element-wise comparison. def lr0_goto(self, I, x): # First we look for a previously cached entry g = self.lr_goto_cache.get((id(I), x)) if g: return g # Now we generate the goto set in a way that guarantees uniqueness # of the result s = self.lr_goto_cache.get(x) if not s: s = {} self.lr_goto_cache[x] = s gs = [] for p in I: n = p.lr_next if n and n.lr_before == x: s1 = s.get(id(n)) if not s1: s1 = {} s[id(n)] = s1 gs.append(n) s = s1 g = s.get('$end') if not g: if gs: g = self.lr0_closure(gs) s['$end'] = g else: s['$end'] = gs self.lr_goto_cache[(id(I), x)] = g return g # Compute the LR(0) sets of item function def lr0_items(self): C = [self.lr0_closure([self.grammar.Productions[0].lr_next])] i = 0 for I in C: self.lr0_cidhash[id(I)] = i i += 1 # Loop over the items in C and each grammar symbols i = 0 while i < len(C): I = C[i] i += 1 # Collect all of the symbols that could possibly be in the goto(I,X) sets asyms = {} for ii in I: for s in ii.usyms: asyms[s] = None for x in asyms: g = self.lr0_goto(I, x) if not g or id(g) in self.lr0_cidhash: continue self.lr0_cidhash[id(g)] = len(C) C.append(g) return C # ----------------------------------------------------------------------------- # ==== LALR(1) Parsing ==== # # LALR(1) parsing is almost exactly the same as SLR except that instead of # relying upon Follow() sets when performing reductions, a more selective # lookahead set that incorporates the state of the LR(0) machine is utilized. # Thus, we mainly just have to focus on calculating the lookahead sets. # # The method used here is due to DeRemer and Pennelo (1982). # # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1) # Lookahead Sets", ACM Transactions on Programming Languages and Systems, # Vol. 4, No. 4, Oct. 1982, pp. 615-649 # # Further details can also be found in: # # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing", # McGraw-Hill Book Company, (1985). # # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # compute_nullable_nonterminals() # # Creates a dictionary containing all of the non-terminals that might produce # an empty production. # ----------------------------------------------------------------------------- def compute_nullable_nonterminals(self): nullable = set() num_nullable = 0 while True: for p in self.grammar.Productions[1:]: if p.len == 0: nullable.add(p.name) continue for t in p.prod: if t not in nullable: break else: nullable.add(p.name) if len(nullable) == num_nullable: break num_nullable = len(nullable) return nullable # ----------------------------------------------------------------------------- # find_nonterminal_trans(C) # # Given a set of LR(0) items, this functions finds all of the non-terminal # transitions. These are transitions in which a dot appears immediately before # a non-terminal. Returns a list of tuples of the form (state,N) where state # is the state number and N is the nonterminal symbol. # # The input C is the set of LR(0) items. # ----------------------------------------------------------------------------- def find_nonterminal_transitions(self, C): trans = [] for stateno, state in enumerate(C): for p in state: if p.lr_index < p.len - 1: t = (stateno, p.prod[p.lr_index+1]) if t[1] in self.grammar.Nonterminals: if t not in trans: trans.append(t) return trans # ----------------------------------------------------------------------------- # dr_relation() # # Computes the DR(p,A) relationships for non-terminal transitions. The input # is a tuple (state,N) where state is a number and N is a nonterminal symbol. # # Returns a list of terminals. # ----------------------------------------------------------------------------- def dr_relation(self, C, trans, nullable): dr_set = {} state, N = trans terms = [] g = self.lr0_goto(C[state], N) for p in g: if p.lr_index < p.len - 1: a = p.prod[p.lr_index+1] if a in self.grammar.Terminals: if a not in terms: terms.append(a) # This extra bit is to handle the start state if state == 0 and N == self.grammar.Productions[0].prod[0]: terms.append('$end') return terms # ----------------------------------------------------------------------------- # reads_relation() # # Computes the READS() relation (p,A) READS (t,C). # ----------------------------------------------------------------------------- def reads_relation(self, C, trans, empty): # Look for empty transitions rel = [] state, N = trans g = self.lr0_goto(C[state], N) j = self.lr0_cidhash.get(id(g), -1) for p in g: if p.lr_index < p.len - 1: a = p.prod[p.lr_index + 1] if a in empty: rel.append((j, a)) return rel # ----------------------------------------------------------------------------- # compute_lookback_includes() # # Determines the lookback and includes relations # # LOOKBACK: # # This relation is determined by running the LR(0) state machine forward. # For example, starting with a production "N : . A B C", we run it forward # to obtain "N : A B C ." We then build a relationship between this final # state and the starting state. These relationships are stored in a dictionary # lookdict. # # INCLUDES: # # Computes the INCLUDE() relation (p,A) INCLUDES (p',B). # # This relation is used to determine non-terminal transitions that occur # inside of other non-terminal transition states. (p,A) INCLUDES (p', B) # if the following holds: # # B -> LAT, where T -> epsilon and p' -L-> p # # L is essentially a prefix (which may be empty), T is a suffix that must be # able to derive an empty string. State p' must lead to state p with the string L. # # ----------------------------------------------------------------------------- def compute_lookback_includes(self, C, trans, nullable): lookdict = {} # Dictionary of lookback relations includedict = {} # Dictionary of include relations # Make a dictionary of non-terminal transitions dtrans = {} for t in trans: dtrans[t] = 1 # Loop over all transitions and compute lookbacks and includes for state, N in trans: lookb = [] includes = [] for p in C[state]: if p.name != N: continue # Okay, we have a name match. We now follow the production all the way # through the state machine until we get the . on the right hand side lr_index = p.lr_index j = state while lr_index < p.len - 1: lr_index = lr_index + 1 t = p.prod[lr_index] # Check to see if this symbol and state are a non-terminal transition if (j, t) in dtrans: # Yes. Okay, there is some chance that this is an includes relation # the only way to know for certain is whether the rest of the # production derives empty li = lr_index + 1 while li < p.len: if p.prod[li] in self.grammar.Terminals: break # No forget it if p.prod[li] not in nullable: break li = li + 1 else: # Appears to be a relation between (j,t) and (state,N) includes.append((j, t)) g = self.lr0_goto(C[j], t) # Go to next set j = self.lr0_cidhash.get(id(g), -1) # Go to next state # When we get here, j is the final state, now we have to locate the production for r in C[j]: if r.name != p.name: continue if r.len != p.len: continue i = 0 # This look is comparing a production ". A B C" with "A B C ." while i < r.lr_index: if r.prod[i] != p.prod[i+1]: break i = i + 1 else: lookb.append((j, r)) for i in includes: if i not in includedict: includedict[i] = [] includedict[i].append((state, N)) lookdict[(state, N)] = lookb return lookdict, includedict # ----------------------------------------------------------------------------- # compute_read_sets() # # Given a set of LR(0) items, this function computes the read sets. # # Inputs: C = Set of LR(0) items # ntrans = Set of nonterminal transitions # nullable = Set of empty transitions # # Returns a set containing the read sets # ----------------------------------------------------------------------------- def compute_read_sets(self, C, ntrans, nullable): FP = lambda x: self.dr_relation(C, x, nullable) R = lambda x: self.reads_relation(C, x, nullable) F = digraph(ntrans, R, FP) return F # ----------------------------------------------------------------------------- # compute_follow_sets() # # Given a set of LR(0) items, a set of non-terminal transitions, a readset, # and an include set, this function computes the follow sets # # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)} # # Inputs: # ntrans = Set of nonterminal transitions # readsets = Readset (previously computed) # inclsets = Include sets (previously computed) # # Returns a set containing the follow sets # ----------------------------------------------------------------------------- def compute_follow_sets(self, ntrans, readsets, inclsets): FP = lambda x: readsets[x] R = lambda x: inclsets.get(x, []) F = digraph(ntrans, R, FP) return F # ----------------------------------------------------------------------------- # add_lookaheads() # # Attaches the lookahead symbols to grammar rules. # # Inputs: lookbacks - Set of lookback relations # followset - Computed follow set # # This function directly attaches the lookaheads to productions contained # in the lookbacks set # ----------------------------------------------------------------------------- def add_lookaheads(self, lookbacks, followset): for trans, lb in lookbacks.items(): # Loop over productions in lookback for state, p in lb: if state not in p.lookaheads: p.lookaheads[state] = [] f = followset.get(trans, []) for a in f: if a not in p.lookaheads[state]: p.lookaheads[state].append(a) # ----------------------------------------------------------------------------- # add_lalr_lookaheads() # # This function does all of the work of adding lookahead information for use # with LALR parsing # ----------------------------------------------------------------------------- def add_lalr_lookaheads(self, C): # Determine all of the nullable nonterminals nullable = self.compute_nullable_nonterminals() # Find all non-terminal transitions trans = self.find_nonterminal_transitions(C) # Compute read sets readsets = self.compute_read_sets(C, trans, nullable) # Compute lookback/includes relations lookd, included = self.compute_lookback_includes(C, trans, nullable) # Compute LALR FOLLOW sets followsets = self.compute_follow_sets(trans, readsets, included) # Add all of the lookaheads self.add_lookaheads(lookd, followsets) # ----------------------------------------------------------------------------- # lr_parse_table() # # This function constructs the parse tables for SLR or LALR # ----------------------------------------------------------------------------- def lr_parse_table(self): Productions = self.grammar.Productions Precedence = self.grammar.Precedence goto = self.lr_goto # Goto array action = self.lr_action # Action array log = self.log # Logger for output actionp = {} # Action production array (temporary) log.info('Parsing method: %s', self.lr_method) # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items # This determines the number of states C = self.lr0_items() if self.lr_method == 'LALR': self.add_lalr_lookaheads(C) # Build the parser table, state by state st = 0 for I in C: # Loop over each production in I actlist = [] # List of actions st_action = {} st_actionp = {} st_goto = {} log.info('') log.info('state %d', st) log.info('') for p in I: log.info(' (%d) %s', p.number, p) log.info('') for p in I: if p.len == p.lr_index + 1: if p.name == "S'": # Start symbol. Accept! st_action['$end'] = 0 st_actionp['$end'] = p else: # We are at the end of a production. Reduce! if self.lr_method == 'LALR': laheads = p.lookaheads[st] else: laheads = self.grammar.Follow[p.name] for a in laheads: actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p))) r = st_action.get(a) if r is not None: # Whoa. Have a shift/reduce or reduce/reduce conflict if r > 0: # Need to decide on shift or reduce here # By default we favor shifting. Need to add # some precedence rules here. sprec, slevel = Productions[st_actionp[a].number].prec rprec, rlevel = Precedence.get(a, ('right', 0)) if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')): # We really need to reduce here. st_action[a] = -p.number st_actionp[a] = p if not slevel and not rlevel: log.info(' ! shift/reduce conflict for %s resolved as reduce', a) self.sr_conflicts.append((st, a, 'reduce')) Productions[p.number].reduced += 1 elif (slevel == rlevel) and (rprec == 'nonassoc'): st_action[a] = None else: # Hmmm. Guess we'll keep the shift if not rlevel: log.info(' ! shift/reduce conflict for %s resolved as shift', a) self.sr_conflicts.append((st, a, 'shift')) elif r < 0: # Reduce/reduce conflict. In this case, we favor the rule # that was defined first in the grammar file oldp = Productions[-r] pp = Productions[p.number] if oldp.line > pp.line: st_action[a] = -p.number st_actionp[a] = p chosenp, rejectp = pp, oldp Productions[p.number].reduced += 1 Productions[oldp.number].reduced -= 1 else: chosenp, rejectp = oldp, pp self.rr_conflicts.append((st, chosenp, rejectp)) log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)', a, st_actionp[a].number, st_actionp[a]) else: raise LALRError('Unknown conflict in state %d' % st) else: st_action[a] = -p.number st_actionp[a] = p Productions[p.number].reduced += 1 else: i = p.lr_index a = p.prod[i+1] # Get symbol right after the "." if a in self.grammar.Terminals: g = self.lr0_goto(I, a) j = self.lr0_cidhash.get(id(g), -1) if j >= 0: # We are in a shift state actlist.append((a, p, 'shift and go to state %d' % j)) r = st_action.get(a) if r is not None: # Whoa have a shift/reduce or shift/shift conflict if r > 0: if r != j: raise LALRError('Shift/shift conflict in state %d' % st) elif r < 0: # Do a precedence check. # - if precedence of reduce rule is higher, we reduce. # - if precedence of reduce is same and left assoc, we reduce. # - otherwise we shift rprec, rlevel = Productions[st_actionp[a].number].prec sprec, slevel = Precedence.get(a, ('right', 0)) if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')): # We decide to shift here... highest precedence to shift Productions[st_actionp[a].number].reduced -= 1 st_action[a] = j st_actionp[a] = p if not rlevel: log.info(' ! shift/reduce conflict for %s resolved as shift', a) self.sr_conflicts.append((st, a, 'shift')) elif (slevel == rlevel) and (rprec == 'nonassoc'): st_action[a] = None else: # Hmmm. Guess we'll keep the reduce if not slevel and not rlevel: log.info(' ! shift/reduce conflict for %s resolved as reduce', a) self.sr_conflicts.append((st, a, 'reduce')) else: raise LALRError('Unknown conflict in state %d' % st) else: st_action[a] = j st_actionp[a] = p # Print the actions associated with each terminal _actprint = {} for a, p, m in actlist: if a in st_action: if p is st_actionp[a]: log.info(' %-15s %s', a, m) _actprint[(a, m)] = 1 log.info('') # Print the actions that were not used. (debugging) not_used = 0 for a, p, m in actlist: if a in st_action: if p is not st_actionp[a]: if not (a, m) in _actprint: log.debug(' ! %-15s [ %s ]', a, m) not_used = 1 _actprint[(a, m)] = 1 if not_used: log.debug('') # Construct the goto table for this state nkeys = {} for ii in I: for s in ii.usyms: if s in self.grammar.Nonterminals: nkeys[s] = None for n in nkeys: g = self.lr0_goto(I, n) j = self.lr0_cidhash.get(id(g), -1) if j >= 0: st_goto[n] = j log.info(' %-30s shift and go to state %d', n, j) action[st] = st_action actionp[st] = st_actionp goto[st] = st_goto st += 1 # ----------------------------------------------------------------------------- # write() # # This function writes the LR parsing tables to a file # ----------------------------------------------------------------------------- def write_table(self, tabmodule, outputdir='', signature=''): if isinstance(tabmodule, types.ModuleType): raise IOError("Won't overwrite existing tabmodule") basemodulename = tabmodule.split('.')[-1] filename = os.path.join(outputdir, basemodulename) + '.py' try: f = open(filename, 'w') f.write(''' # %s # This file is automatically generated. Do not edit. _tabversion = %r _lr_method = %r _lr_signature = %r ''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature)) # Change smaller to 0 to go back to original tables smaller = 1 # Factor out names to try and make smaller if smaller: items = {} for s, nd in self.lr_action.items(): for name, v in nd.items(): i = items.get(name) if not i: i = ([], []) items[name] = i i[0].append(s) i[1].append(v) f.write('\n_lr_action_items = {') for k, v in items.items(): f.write('%r:([' % k) for i in v[0]: f.write('%r,' % i) f.write('],[') for i in v[1]: f.write('%r,' % i) f.write(']),') f.write('}\n') f.write(''' _lr_action = {} for _k, _v in _lr_action_items.items(): for _x,_y in zip(_v[0],_v[1]): if not _x in _lr_action: _lr_action[_x] = {} _lr_action[_x][_k] = _y del _lr_action_items ''') else: f.write('\n_lr_action = { ') for k, v in self.lr_action.items(): f.write('(%r,%r):%r,' % (k[0], k[1], v)) f.write('}\n') if smaller: # Factor out names to try and make smaller items = {} for s, nd in self.lr_goto.items(): for name, v in nd.items(): i = items.get(name) if not i: i = ([], []) items[name] = i i[0].append(s) i[1].append(v) f.write('\n_lr_goto_items = {') for k, v in items.items(): f.write('%r:([' % k) for i in v[0]: f.write('%r,' % i) f.write('],[') for i in v[1]: f.write('%r,' % i) f.write(']),') f.write('}\n') f.write(''' _lr_goto = {} for _k, _v in _lr_goto_items.items(): for _x, _y in zip(_v[0], _v[1]): if not _x in _lr_goto: _lr_goto[_x] = {} _lr_goto[_x][_k] = _y del _lr_goto_items ''') else: f.write('\n_lr_goto = { ') for k, v in self.lr_goto.items(): f.write('(%r,%r):%r,' % (k[0], k[1], v)) f.write('}\n') # Write production table f.write('_lr_productions = [\n') for p in self.lr_productions: if p.func: f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line)) else: f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len)) f.write(']\n') f.close() except IOError as e: raise # ----------------------------------------------------------------------------- # pickle_table() # # This function pickles the LR parsing tables to a supplied file object # ----------------------------------------------------------------------------- def pickle_table(self, filename, signature=''): try: import cPickle as pickle except ImportError: import pickle with open(filename, 'wb') as outf: pickle.dump(__tabversion__, outf, pickle_protocol) pickle.dump(self.lr_method, outf, pickle_protocol) pickle.dump(signature, outf, pickle_protocol) pickle.dump(self.lr_action, outf, pickle_protocol) pickle.dump(self.lr_goto, outf, pickle_protocol) outp = [] for p in self.lr_productions: if p.func: outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line)) else: outp.append((str(p), p.name, p.len, None, None, None)) pickle.dump(outp, outf, pickle_protocol) # ----------------------------------------------------------------------------- # === INTROSPECTION === # # The following functions and classes are used to implement the PLY # introspection features followed by the yacc() function itself. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # get_caller_module_dict() # # This function returns a dictionary containing all of the symbols defined within # a caller further down the call stack. This is used to get the environment # associated with the yacc() call if none was provided. # ----------------------------------------------------------------------------- def get_caller_module_dict(levels): f = sys._getframe(levels) ldict = f.f_globals.copy() if f.f_globals != f.f_locals: ldict.update(f.f_locals) return ldict # ----------------------------------------------------------------------------- # parse_grammar() # # This takes a raw grammar rule string and parses it into production data # ----------------------------------------------------------------------------- def parse_grammar(doc, file, line): grammar = [] # Split the doc string into lines pstrings = doc.splitlines() lastp = None dline = line for ps in pstrings: dline += 1 p = ps.split() if not p: continue try: if p[0] == '|': # This is a continuation of a previous rule if not lastp: raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline)) prodname = lastp syms = p[1:] else: prodname = p[0] lastp = prodname syms = p[2:] assign = p[1] if assign != ':' and assign != '::=': raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline)) grammar.append((file, dline, prodname, syms)) except SyntaxError: raise except Exception: raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip())) return grammar # ----------------------------------------------------------------------------- # ParserReflect() # # This class represents information extracted for building a parser including # start symbol, error function, tokens, precedence list, action functions, # etc. # ----------------------------------------------------------------------------- class ParserReflect(object): def __init__(self, pdict, log=None): self.pdict = pdict self.start = None self.error_func = None self.tokens = None self.modules = set() self.grammar = [] self.error = False if log is None: self.log = PlyLogger(sys.stderr) else: self.log = log # Get all of the basic information def get_all(self): self.get_start() self.get_error_func() self.get_tokens() self.get_precedence() self.get_pfunctions() # Validate all of the information def validate_all(self): self.validate_start() self.validate_error_func() self.validate_tokens() self.validate_precedence() self.validate_pfunctions() self.validate_modules() return self.error # Compute a signature over the grammar def signature(self): try: from hashlib import md5 except ImportError: from md5 import md5 try: sig = md5() if self.start: sig.update(self.start.encode('latin-1')) if self.prec: sig.update(''.join([''.join(p) for p in self.prec]).encode('latin-1')) if self.tokens: sig.update(' '.join(self.tokens).encode('latin-1')) for f in self.pfuncs: if f[3]: sig.update(f[3].encode('latin-1')) except (TypeError, ValueError): pass digest = base64.b16encode(sig.digest()) if sys.version_info[0] >= 3: digest = digest.decode('latin-1') return digest # ----------------------------------------------------------------------------- # validate_modules() # # This method checks to see if there are duplicated p_rulename() functions # in the parser module file. Without this function, it is really easy for # users to make mistakes by cutting and pasting code fragments (and it's a real # bugger to try and figure out why the resulting parser doesn't work). Therefore, # we just do a little regular expression pattern matching of def statements # to try and detect duplicates. # ----------------------------------------------------------------------------- def validate_modules(self): # Match def p_funcname( fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(') for module in self.modules: try: lines, linen = inspect.getsourcelines(module) except IOError: continue counthash = {} for linen, line in enumerate(lines): linen += 1 m = fre.match(line) if m: name = m.group(1) prev = counthash.get(name) if not prev: counthash[name] = linen else: filename = inspect.getsourcefile(module) self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d', filename, linen, name, prev) # Get the start symbol def get_start(self): self.start = self.pdict.get('start') # Validate the start symbol def validate_start(self): if self.start is not None: if not isinstance(self.start, string_types): self.log.error("'start' must be a string") # Look for error handler def get_error_func(self): self.error_func = self.pdict.get('p_error') # Validate the error function def validate_error_func(self): if self.error_func: if isinstance(self.error_func, types.FunctionType): ismethod = 0 elif isinstance(self.error_func, types.MethodType): ismethod = 1 else: self.log.error("'p_error' defined, but is not a function or method") self.error = True return eline = self.error_func.__code__.co_firstlineno efile = self.error_func.__code__.co_filename module = inspect.getmodule(self.error_func) self.modules.add(module) argcount = self.error_func.__code__.co_argcount - ismethod if argcount != 1: self.log.error('%s:%d: p_error() requires 1 argument', efile, eline) self.error = True # Get the tokens map def get_tokens(self): tokens = self.pdict.get('tokens') if not tokens: self.log.error('No token list is defined') self.error = True return if not isinstance(tokens, (list, tuple)): self.log.error('tokens must be a list or tuple') self.error = True return if not tokens: self.log.error('tokens is empty') self.error = True return self.tokens = tokens # Validate the tokens def validate_tokens(self): # Validate the tokens. if 'error' in self.tokens: self.log.error("Illegal token name 'error'. Is a reserved word") self.error = True return terminals = set() for n in self.tokens: if n in terminals: self.log.warning('Token %r multiply defined', n) terminals.add(n) # Get the precedence map (if any) def get_precedence(self): self.prec = self.pdict.get('precedence') # Validate and parse the precedence map def validate_precedence(self): preclist = [] if self.prec: if not isinstance(self.prec, (list, tuple)): self.log.error('precedence must be a list or tuple') self.error = True return for level, p in enumerate(self.prec): if not isinstance(p, (list, tuple)): self.log.error('Bad precedence table') self.error = True return if len(p) < 2: self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p) self.error = True return assoc = p[0] if not isinstance(assoc, string_types): self.log.error('precedence associativity must be a string') self.error = True return for term in p[1:]: if not isinstance(term, string_types): self.log.error('precedence items must be strings') self.error = True return preclist.append((term, assoc, level+1)) self.preclist = preclist # Get all p_functions from the grammar def get_pfunctions(self): p_functions = [] for name, item in self.pdict.items(): if not name.startswith('p_') or name == 'p_error': continue if isinstance(item, (types.FunctionType, types.MethodType)): line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno) module = inspect.getmodule(item) p_functions.append((line, module, name, item.__doc__)) # Sort all of the actions by line number; make sure to stringify # modules to make them sortable, since `line` may not uniquely sort all # p functions p_functions.sort(key=lambda p_function: ( p_function[0], str(p_function[1]), p_function[2], p_function[3])) self.pfuncs = p_functions # Validate all of the p_functions def validate_pfunctions(self): grammar = [] # Check for non-empty symbols if len(self.pfuncs) == 0: self.log.error('no rules of the form p_rulename are defined') self.error = True return for line, module, name, doc in self.pfuncs: file = inspect.getsourcefile(module) func = self.pdict[name] if isinstance(func, types.MethodType): reqargs = 2 else: reqargs = 1 if func.__code__.co_argcount > reqargs: self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__) self.error = True elif func.__code__.co_argcount < reqargs: self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__) self.error = True elif not func.__doc__: self.log.warning('%s:%d: No documentation string specified in function %r (ignored)', file, line, func.__name__) else: try: parsed_g = parse_grammar(doc, file, line) for g in parsed_g: grammar.append((name, g)) except SyntaxError as e: self.log.error(str(e)) self.error = True # Looks like a valid grammar rule # Mark the file in which defined. self.modules.add(module) # Secondary validation step that looks for p_ definitions that are not functions # or functions that look like they might be grammar rules. for n, v in self.pdict.items(): if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)): continue if n.startswith('t_'): continue if n.startswith('p_') and n != 'p_error': self.log.warning('%r not defined as a function', n) if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or (isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)): if v.__doc__: try: doc = v.__doc__.split(' ') if doc[1] == ':': self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix', v.__code__.co_filename, v.__code__.co_firstlineno, n) except IndexError: pass self.grammar = grammar # ----------------------------------------------------------------------------- # yacc(module) # # Build a parser # ----------------------------------------------------------------------------- def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file, outputdir=None, debuglog=None, errorlog=None, picklefile=None): if tabmodule is None: tabmodule = tab_module # Reference to the parsing method of the last built parser global parse # If pickling is enabled, table files are not created if picklefile: write_tables = 0 if errorlog is None: errorlog = PlyLogger(sys.stderr) # Get the module dictionary used for the parser if module: _items = [(k, getattr(module, k)) for k in dir(module)] pdict = dict(_items) # If no __file__ attribute is available, try to obtain it from the __module__ instead if '__file__' not in pdict: pdict['__file__'] = sys.modules[pdict['__module__']].__file__ else: pdict = get_caller_module_dict(2) if outputdir is None: # If no output directory is set, the location of the output files # is determined according to the following rules: # - If tabmodule specifies a package, files go into that package directory # - Otherwise, files go in the same directory as the specifying module if isinstance(tabmodule, types.ModuleType): srcfile = tabmodule.__file__ else: if '.' not in tabmodule: srcfile = pdict['__file__'] else: parts = tabmodule.split('.') pkgname = '.'.join(parts[:-1]) exec('import %s' % pkgname) srcfile = getattr(sys.modules[pkgname], '__file__', '') outputdir = os.path.dirname(srcfile) # Determine if the module is package of a package or not. # If so, fix the tabmodule setting so that tables load correctly pkg = pdict.get('__package__') if pkg and isinstance(tabmodule, str): if '.' not in tabmodule: tabmodule = pkg + '.' + tabmodule # Set start symbol if it's specified directly using an argument if start is not None: pdict['start'] = start # Collect parser information from the dictionary pinfo = ParserReflect(pdict, log=errorlog) pinfo.get_all() if pinfo.error: raise YaccError('Unable to build parser') # Check signature against table files (if any) signature = pinfo.signature() # Read the tables try: lr = LRTable() if picklefile: read_signature = lr.read_pickle(picklefile) else: read_signature = lr.read_table(tabmodule) if optimize or (read_signature == signature): try: lr.bind_callables(pinfo.pdict) parser = LRParser(lr, pinfo.error_func) parse = parser.parse return parser except Exception as e: errorlog.warning('There was a problem loading the table file: %r', e) except VersionError as e: errorlog.warning(str(e)) except ImportError: pass if debuglog is None: if debug: try: debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w')) except IOError as e: errorlog.warning("Couldn't open %r. %s" % (debugfile, e)) debuglog = NullLogger() else: debuglog = NullLogger() debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__) errors = False # Validate the parser information if pinfo.validate_all(): raise YaccError('Unable to build parser') if not pinfo.error_func: errorlog.warning('no p_error() function is defined') # Create a grammar object grammar = Grammar(pinfo.tokens) # Set precedence level for terminals for term, assoc, level in pinfo.preclist: try: grammar.set_precedence(term, assoc, level) except GrammarError as e: errorlog.warning('%s', e) # Add productions to the grammar for funcname, gram in pinfo.grammar: file, line, prodname, syms = gram try: grammar.add_production(prodname, syms, funcname, file, line) except GrammarError as e: errorlog.error('%s', e) errors = True # Set the grammar start symbols try: if start is None: grammar.set_start(pinfo.start) else: grammar.set_start(start) except GrammarError as e: errorlog.error(str(e)) errors = True if errors: raise YaccError('Unable to build parser') # Verify the grammar structure undefined_symbols = grammar.undefined_symbols() for sym, prod in undefined_symbols: errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym) errors = True unused_terminals = grammar.unused_terminals() if unused_terminals: debuglog.info('') debuglog.info('Unused terminals:') debuglog.info('') for term in unused_terminals: errorlog.warning('Token %r defined, but not used', term) debuglog.info(' %s', term) # Print out all productions to the debug log if debug: debuglog.info('') debuglog.info('Grammar') debuglog.info('') for n, p in enumerate(grammar.Productions): debuglog.info('Rule %-5d %s', n, p) # Find unused non-terminals unused_rules = grammar.unused_rules() for prod in unused_rules: errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name) if len(unused_terminals) == 1: errorlog.warning('There is 1 unused token') if len(unused_terminals) > 1: errorlog.warning('There are %d unused tokens', len(unused_terminals)) if len(unused_rules) == 1: errorlog.warning('There is 1 unused rule') if len(unused_rules) > 1: errorlog.warning('There are %d unused rules', len(unused_rules)) if debug: debuglog.info('') debuglog.info('Terminals, with rules where they appear') debuglog.info('') terms = list(grammar.Terminals) terms.sort() for term in terms: debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]])) debuglog.info('') debuglog.info('Nonterminals, with rules where they appear') debuglog.info('') nonterms = list(grammar.Nonterminals) nonterms.sort() for nonterm in nonterms: debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]])) debuglog.info('') if check_recursion: unreachable = grammar.find_unreachable() for u in unreachable: errorlog.warning('Symbol %r is unreachable', u) infinite = grammar.infinite_cycles() for inf in infinite: errorlog.error('Infinite recursion detected for symbol %r', inf) errors = True unused_prec = grammar.unused_precedence() for term, assoc in unused_prec: errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term) errors = True if errors: raise YaccError('Unable to build parser') # Run the LRGeneratedTable on the grammar if debug: errorlog.debug('Generating %s tables', method) lr = LRGeneratedTable(grammar, method, debuglog) if debug: num_sr = len(lr.sr_conflicts) # Report shift/reduce and reduce/reduce conflicts if num_sr == 1: errorlog.warning('1 shift/reduce conflict') elif num_sr > 1: errorlog.warning('%d shift/reduce conflicts', num_sr) num_rr = len(lr.rr_conflicts) if num_rr == 1: errorlog.warning('1 reduce/reduce conflict') elif num_rr > 1: errorlog.warning('%d reduce/reduce conflicts', num_rr) # Write out conflicts to the output file if debug and (lr.sr_conflicts or lr.rr_conflicts): debuglog.warning('') debuglog.warning('Conflicts:') debuglog.warning('') for state, tok, resolution in lr.sr_conflicts: debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution) already_reported = set() for state, rule, rejected in lr.rr_conflicts: if (state, id(rule), id(rejected)) in already_reported: continue debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule) debuglog.warning('rejected rule (%s) in state %d', rejected, state) errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule) errorlog.warning('rejected rule (%s) in state %d', rejected, state) already_reported.add((state, id(rule), id(rejected))) warned_never = [] for state, rule, rejected in lr.rr_conflicts: if not rejected.reduced and (rejected not in warned_never): debuglog.warning('Rule (%s) is never reduced', rejected) errorlog.warning('Rule (%s) is never reduced', rejected) warned_never.append(rejected) # Write the table file if requested if write_tables: try: lr.write_table(tabmodule, outputdir, signature) except IOError as e: errorlog.warning("Couldn't create %r. %s" % (tabmodule, e)) # Write a pickled version of the tables if picklefile: try: lr.pickle_table(picklefile, signature) except IOError as e: errorlog.warning("Couldn't create %r. %s" % (picklefile, e)) # Build the parser lr.bind_callables(pinfo.pdict) parser = LRParser(lr, pinfo.error_func) parse = parser.parse return parser
30a92c9cde344de84f86055fc422618e3fc18cbf78ddaa6b78004a633f9b9746
# ---------------------------------------------------------------------- # ctokens.py # # Token specifications for symbols in ANSI C and C++. This file is # meant to be used as a library in other tokenizers. # ---------------------------------------------------------------------- # Reserved words tokens = [ # Literals (identifier, integer constant, float constant, string constant, char const) 'ID', 'TYPEID', 'INTEGER', 'FLOAT', 'STRING', 'CHARACTER', # Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=) 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO', 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT', 'LOR', 'LAND', 'LNOT', 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE', # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=) 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL', 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL', # Increment/decrement (++,--) 'INCREMENT', 'DECREMENT', # Structure dereference (->) 'ARROW', # Ternary operator (?) 'TERNARY', # Delimeters ( ) [ ] { } , . ; : 'LPAREN', 'RPAREN', 'LBRACKET', 'RBRACKET', 'LBRACE', 'RBRACE', 'COMMA', 'PERIOD', 'SEMI', 'COLON', # Ellipsis (...) 'ELLIPSIS', ] # Operators t_PLUS = r'\+' t_MINUS = r'-' t_TIMES = r'\*' t_DIVIDE = r'/' t_MODULO = r'%' t_OR = r'\|' t_AND = r'&' t_NOT = r'~' t_XOR = r'\^' t_LSHIFT = r'<<' t_RSHIFT = r'>>' t_LOR = r'\|\|' t_LAND = r'&&' t_LNOT = r'!' t_LT = r'<' t_GT = r'>' t_LE = r'<=' t_GE = r'>=' t_EQ = r'==' t_NE = r'!=' # Assignment operators t_EQUALS = r'=' t_TIMESEQUAL = r'\*=' t_DIVEQUAL = r'/=' t_MODEQUAL = r'%=' t_PLUSEQUAL = r'\+=' t_MINUSEQUAL = r'-=' t_LSHIFTEQUAL = r'<<=' t_RSHIFTEQUAL = r'>>=' t_ANDEQUAL = r'&=' t_OREQUAL = r'\|=' t_XOREQUAL = r'\^=' # Increment/decrement t_INCREMENT = r'\+\+' t_DECREMENT = r'--' # -> t_ARROW = r'->' # ? t_TERNARY = r'\?' # Delimeters t_LPAREN = r'\(' t_RPAREN = r'\)' t_LBRACKET = r'\[' t_RBRACKET = r'\]' t_LBRACE = r'\{' t_RBRACE = r'\}' t_COMMA = r',' t_PERIOD = r'\.' t_SEMI = r';' t_COLON = r':' t_ELLIPSIS = r'\.\.\.' # Identifiers t_ID = r'[A-Za-z_][A-Za-z0-9_]*' # Integer literal t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?' # Floating literal t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?' # String literal t_STRING = r'\"([^\\\n]|(\\.))*?\"' # Character constant 'c' or L'c' t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\'' # Comment (C-Style) def t_COMMENT(t): r'/\*(.|\n)*?\*/' t.lexer.lineno += t.value.count('\n') return t # Comment (C++-Style) def t_CPPCOMMENT(t): r'//.*\n' t.lexer.lineno += 1 return t
03a85d259563237b7f81e79b67d07352fc11ac85e8d257f0cd094cd8b70ac9ab
"""Utilities for writing code that runs on Python 2 and 3""" # Copyright (c) 2010-2015 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import import functools import itertools import operator import sys import types __author__ = "Benjamin Peterson <[email protected]>" __version__ = "1.10.0" # Useful for very coarse version differentiation. PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 PY34 = sys.version_info[0:2] >= (3, 4) if PY3: string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str if sys.platform.startswith("java"): # Jython always uses 32 bits. MAXSIZE = int((1 << 31) - 1) else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): def __len__(self): return 1 << 31 try: len(X()) except OverflowError: # 32-bit MAXSIZE = int((1 << 31) - 1) else: # 64-bit MAXSIZE = int((1 << 63) - 1) del X def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc def _import_module(name): """Import module, returning the module after the last dot.""" __import__(name) return sys.modules[name] class _LazyDescr(object): def __init__(self, name): self.name = name def __get__(self, obj, tp): result = self._resolve() setattr(obj, self.name, result) # Invokes __set__. try: # This is a bit ugly, but it avoids running this again by # removing this descriptor. delattr(obj.__class__, self.name) except AttributeError: pass return result class MovedModule(_LazyDescr): def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: if new is None: new = name self.mod = new else: self.mod = old def _resolve(self): return _import_module(self.mod) def __getattr__(self, attr): _module = self._resolve() value = getattr(_module, attr) setattr(self, attr, value) return value class _LazyModule(types.ModuleType): def __init__(self, name): super(_LazyModule, self).__init__(name) self.__doc__ = self.__class__.__doc__ def __dir__(self): attrs = ["__doc__", "__name__"] attrs += [attr.name for attr in self._moved_attributes] return attrs # Subclasses should override this _moved_attributes = [] class MovedAttribute(_LazyDescr): def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: if new_mod is None: new_mod = name self.mod = new_mod if new_attr is None: if old_attr is None: new_attr = name else: new_attr = old_attr self.attr = new_attr else: self.mod = old_mod if old_attr is None: old_attr = name self.attr = old_attr def _resolve(self): module = _import_module(self.mod) return getattr(module, self.attr) class _SixMetaPathImporter(object): """ A meta path importer to import six.moves and its submodules. This class implements a PEP302 finder and loader. It should be compatible with Python 2.5 and all existing versions of Python3 """ def __init__(self, six_module_name): self.name = six_module_name self.known_modules = {} def _add_module(self, mod, *fullnames): for fullname in fullnames: self.known_modules[self.name + "." + fullname] = mod def _get_module(self, fullname): return self.known_modules[self.name + "." + fullname] def find_module(self, fullname, path=None): if fullname in self.known_modules: return self return None def __get_module(self, fullname): try: return self.known_modules[fullname] except KeyError: raise ImportError("This loader does not know module " + fullname) def load_module(self, fullname): try: # in case of a reload return sys.modules[fullname] except KeyError: pass mod = self.__get_module(fullname) if isinstance(mod, MovedModule): mod = mod._resolve() else: mod.__loader__ = self sys.modules[fullname] = mod return mod def is_package(self, fullname): """ Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451) """ return hasattr(self.__get_module(fullname), "__path__") def get_code(self, fullname): """Return None Required, if is_package is implemented""" self.__get_module(fullname) # eventually raises ImportError return None get_source = get_code # same as get_code _importer = _SixMetaPathImporter(__name__) class _MovedItems(_LazyModule): """Lazy loading of moved objects""" __path__ = [] # mark as package _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("intern", "__builtin__", "sys"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), MovedAttribute("UserDict", "UserDict", "collections"), MovedAttribute("UserList", "UserList", "collections"), MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), MovedModule("cPickle", "cPickle", "pickle"), MovedModule("queue", "Queue"), MovedModule("reprlib", "repr"), MovedModule("socketserver", "SocketServer"), MovedModule("_thread", "thread", "_thread"), MovedModule("tkinter", "Tkinter"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"), MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), ] # Add windows specific modules. if sys.platform == "win32": _moved_attributes += [ MovedModule("winreg", "_winreg"), ] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) if isinstance(attr, MovedModule): _importer._add_module(attr, "moves." + attr.name) del attr _MovedItems._moved_attributes = _moved_attributes moves = _MovedItems(__name__ + ".moves") _importer._add_module(moves, "moves") class Module_six_moves_urllib_parse(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_parse""" _urllib_parse_moved_attributes = [ MovedAttribute("ParseResult", "urlparse", "urllib.parse"), MovedAttribute("SplitResult", "urlparse", "urllib.parse"), MovedAttribute("parse_qs", "urlparse", "urllib.parse"), MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), MovedAttribute("urldefrag", "urlparse", "urllib.parse"), MovedAttribute("urljoin", "urlparse", "urllib.parse"), MovedAttribute("urlparse", "urlparse", "urllib.parse"), MovedAttribute("urlsplit", "urlparse", "urllib.parse"), MovedAttribute("urlunparse", "urlparse", "urllib.parse"), MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), MovedAttribute("quote", "urllib", "urllib.parse"), MovedAttribute("quote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("splitquery", "urllib", "urllib.parse"), MovedAttribute("splittag", "urllib", "urllib.parse"), MovedAttribute("splituser", "urllib", "urllib.parse"), MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), MovedAttribute("uses_params", "urlparse", "urllib.parse"), MovedAttribute("uses_query", "urlparse", "urllib.parse"), MovedAttribute("uses_relative", "urlparse", "urllib.parse"), ] for attr in _urllib_parse_moved_attributes: setattr(Module_six_moves_urllib_parse, attr.name, attr) del attr Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes _importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), "moves.urllib_parse", "moves.urllib.parse") class Module_six_moves_urllib_error(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_error""" _urllib_error_moved_attributes = [ MovedAttribute("URLError", "urllib2", "urllib.error"), MovedAttribute("HTTPError", "urllib2", "urllib.error"), MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), ] for attr in _urllib_error_moved_attributes: setattr(Module_six_moves_urllib_error, attr.name, attr) del attr Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes _importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), "moves.urllib_error", "moves.urllib.error") class Module_six_moves_urllib_request(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_request""" _urllib_request_moved_attributes = [ MovedAttribute("urlopen", "urllib2", "urllib.request"), MovedAttribute("install_opener", "urllib2", "urllib.request"), MovedAttribute("build_opener", "urllib2", "urllib.request"), MovedAttribute("pathname2url", "urllib", "urllib.request"), MovedAttribute("url2pathname", "urllib", "urllib.request"), MovedAttribute("getproxies", "urllib", "urllib.request"), MovedAttribute("Request", "urllib2", "urllib.request"), MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), MovedAttribute("BaseHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), MovedAttribute("FileHandler", "urllib2", "urllib.request"), MovedAttribute("FTPHandler", "urllib2", "urllib.request"), MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), MovedAttribute("urlretrieve", "urllib", "urllib.request"), MovedAttribute("urlcleanup", "urllib", "urllib.request"), MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) del attr Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes _importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), "moves.urllib_request", "moves.urllib.request") class Module_six_moves_urllib_response(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_response""" _urllib_response_moved_attributes = [ MovedAttribute("addbase", "urllib", "urllib.response"), MovedAttribute("addclosehook", "urllib", "urllib.response"), MovedAttribute("addinfo", "urllib", "urllib.response"), MovedAttribute("addinfourl", "urllib", "urllib.response"), ] for attr in _urllib_response_moved_attributes: setattr(Module_six_moves_urllib_response, attr.name, attr) del attr Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes _importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), "moves.urllib_response", "moves.urllib.response") class Module_six_moves_urllib_robotparser(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_robotparser""" _urllib_robotparser_moved_attributes = [ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), ] for attr in _urllib_robotparser_moved_attributes: setattr(Module_six_moves_urllib_robotparser, attr.name, attr) del attr Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes _importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), "moves.urllib_robotparser", "moves.urllib.robotparser") class Module_six_moves_urllib(types.ModuleType): """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" __path__ = [] # mark as package parse = _importer._get_module("moves.urllib_parse") error = _importer._get_module("moves.urllib_error") request = _importer._get_module("moves.urllib_request") response = _importer._get_module("moves.urllib_response") robotparser = _importer._get_module("moves.urllib_robotparser") def __dir__(self): return ['parse', 'error', 'request', 'response', 'robotparser'] _importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib") def add_move(move): """Add an item to six.moves.""" setattr(_MovedItems, move.name, move) def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,)) if PY3: _meth_func = "__func__" _meth_self = "__self__" _func_closure = "__closure__" _func_code = "__code__" _func_defaults = "__defaults__" _func_globals = "__globals__" else: _meth_func = "im_func" _meth_self = "im_self" _func_closure = "func_closure" _func_code = "func_code" _func_defaults = "func_defaults" _func_globals = "func_globals" try: advance_iterator = next except NameError: def advance_iterator(it): return it.next() next = advance_iterator try: callable = callable except NameError: def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) if PY3: def get_unbound_function(unbound): return unbound create_bound_method = types.MethodType def create_unbound_method(func, cls): return func Iterator = object else: def get_unbound_function(unbound): return unbound.im_func def create_bound_method(func, obj): return types.MethodType(func, obj, obj.__class__) def create_unbound_method(func, cls): return types.MethodType(func, None, cls) class Iterator(object): def next(self): return type(self).__next__(self) callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_closure = operator.attrgetter(_func_closure) get_function_code = operator.attrgetter(_func_code) get_function_defaults = operator.attrgetter(_func_defaults) get_function_globals = operator.attrgetter(_func_globals) if PY3: def iterkeys(d, **kw): return iter(d.keys(**kw)) def itervalues(d, **kw): return iter(d.values(**kw)) def iteritems(d, **kw): return iter(d.items(**kw)) def iterlists(d, **kw): return iter(d.lists(**kw)) viewkeys = operator.methodcaller("keys") viewvalues = operator.methodcaller("values") viewitems = operator.methodcaller("items") else: def iterkeys(d, **kw): return d.iterkeys(**kw) def itervalues(d, **kw): return d.itervalues(**kw) def iteritems(d, **kw): return d.iteritems(**kw) def iterlists(d, **kw): return d.iterlists(**kw) viewkeys = operator.methodcaller("viewkeys") viewvalues = operator.methodcaller("viewvalues") viewitems = operator.methodcaller("viewitems") _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") _add_doc(itervalues, "Return an iterator over the values of a dictionary.") _add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.") _add_doc(iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary.") if PY3: def b(s): return s.encode("latin-1") def u(s): return s unichr = chr import struct int2byte = struct.Struct(">B").pack del struct byte2int = operator.itemgetter(0) indexbytes = operator.getitem iterbytes = iter import io StringIO = io.StringIO BytesIO = io.BytesIO _assertCountEqual = "assertCountEqual" if sys.version_info[1] <= 1: _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" else: _assertRaisesRegex = "assertRaisesRegex" _assertRegex = "assertRegex" else: def b(s): return s # Workaround for standalone backslash def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") unichr = unichr int2byte = chr def byte2int(bs): return ord(bs[0]) def indexbytes(buf, i): return ord(buf[i]) iterbytes = functools.partial(itertools.imap, ord) import StringIO StringIO = BytesIO = StringIO.StringIO _assertCountEqual = "assertItemsEqual" _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") def assertCountEqual(self, *args, **kwargs): return getattr(self, _assertCountEqual)(*args, **kwargs) def assertRaisesRegex(self, *args, **kwargs): return getattr(self, _assertRaisesRegex)(*args, **kwargs) def assertRegex(self, *args, **kwargs): return getattr(self, _assertRegex)(*args, **kwargs) if PY3: exec_ = getattr(moves.builtins, "exec") def reraise(tp, value, tb=None): if value is None: value = tp() if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value else: def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb """) if sys.version_info[:2] == (3, 2): exec_("""def raise_from(value, from_value): if from_value is None: raise value raise value from from_value """) elif sys.version_info[:2] > (3, 2): exec_("""def raise_from(value, from_value): raise value from from_value """) else: def raise_from(value, from_value): raise value print_ = getattr(moves.builtins, "print", None) if print_ is None: def print_(*args, **kwargs): """The new-style print function for Python 2.4 and 2.5.""" fp = kwargs.pop("file", sys.stdout) if fp is None: return def write(data): if not isinstance(data, basestring): data = str(data) # If the file has an encoding, encode unicode with it. if (isinstance(fp, file) and isinstance(data, unicode) and fp.encoding is not None): errors = getattr(fp, "errors", None) if errors is None: errors = "strict" data = data.encode(fp.encoding, errors) fp.write(data) want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: if isinstance(sep, unicode): want_unicode = True elif not isinstance(sep, str): raise TypeError("sep must be None or a string") end = kwargs.pop("end", None) if end is not None: if isinstance(end, unicode): want_unicode = True elif not isinstance(end, str): raise TypeError("end must be None or a string") if kwargs: raise TypeError("invalid keyword arguments to print()") if not want_unicode: for arg in args: if isinstance(arg, unicode): want_unicode = True break if want_unicode: newline = unicode("\n") space = unicode(" ") else: newline = "\n" space = " " if sep is None: sep = space if end is None: end = newline for i, arg in enumerate(args): if i: write(sep) write(arg) write(end) if sys.version_info[:2] < (3, 3): _print = print_ def print_(*args, **kwargs): fp = kwargs.get("file", sys.stdout) flush = kwargs.pop("flush", False) _print(*args, **kwargs) if flush and fp is not None: fp.flush() _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): def wrapper(f): f = functools.wraps(wrapped, assigned, updated)(f) f.__wrapped__ = wrapped return f return wrapper else: wraps = functools.wraps def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, 'temporary_class', (), {}) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. """ if PY2: if '__str__' not in klass.__dict__: raise ValueError("@python_2_unicode_compatible cannot be applied " "to %s because it doesn't define __str__()." % klass.__name__) klass.__unicode__ = klass.__str__ klass.__str__ = lambda self: self.__unicode__().encode('utf-8') return klass # Complete the moves implementation. # This code is at the end of this module to speed up module loading. # Turn this module into a package. __path__ = [] # required for PEP 302 and PEP 451 __package__ = __name__ # see PEP 366 @ReservedAssignment if globals().get("__spec__") is not None: __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable # Remove other six meta path importers, since they cause problems. This can # happen if six is removed from sys.modules and then reloaded. (Setuptools does # this for some reason.) if sys.meta_path: for i, importer in enumerate(sys.meta_path): # Here's some real nastiness: Another "instance" of the six module might # be floating around. Therefore, we can't use isinstance() to check for # the six meta path importer, since the other six instance will have # inserted an importer with different class. if (type(importer).__name__ == "_SixMetaPathImporter" and importer.name == __name__): del sys.meta_path[i] break del i, importer # Finally, add the importer to the meta path import hook. sys.meta_path.append(_importer)
1bd374b063b8b240a449761811b14603f1d6a2457c8fd50403e300275cc56d1c
# configobj.py # A config file reader/writer that supports nested sections in config files. # Copyright (C) 2005-2014: # (name) : (email) # Michael Foord: fuzzyman AT voidspace DOT org DOT uk # Nicola Larosa: nico AT tekNico DOT net # Rob Dennis: rdennis AT gmail DOT com # Eli Courtwright: eli AT courtwright DOT org # This software is licensed under the terms of the BSD license. # http://opensource.org/licenses/BSD-3-Clause # ConfigObj 5 - main repository for documentation and issue tracking: # https://github.com/DiffSK/configobj import os import re import sys import collections from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE # imported lazily to avoid startup performance hit if it isn't used compiler = None # A dictionary mapping BOM to # the encoding to decode with, and what to set the # encoding attribute to. BOMS = { BOM_UTF8: ('utf_8', None), BOM_UTF16_BE: ('utf16_be', 'utf_16'), BOM_UTF16_LE: ('utf16_le', 'utf_16'), BOM_UTF16: ('utf_16', 'utf_16'), } # All legal variants of the BOM codecs. # TODO: the list of aliases is not meant to be exhaustive, is there a # better way ? BOM_LIST = { 'utf_16': 'utf_16', 'u16': 'utf_16', 'utf16': 'utf_16', 'utf-16': 'utf_16', 'utf16_be': 'utf16_be', 'utf_16_be': 'utf16_be', 'utf-16be': 'utf16_be', 'utf16_le': 'utf16_le', 'utf_16_le': 'utf16_le', 'utf-16le': 'utf16_le', 'utf_8': 'utf_8', 'u8': 'utf_8', 'utf': 'utf_8', 'utf8': 'utf_8', 'utf-8': 'utf_8', } # Map of encodings to the BOM to write. BOM_SET = { 'utf_8': BOM_UTF8, 'utf_16': BOM_UTF16, 'utf16_be': BOM_UTF16_BE, 'utf16_le': BOM_UTF16_LE, None: BOM_UTF8 } def match_utf8(encoding): return BOM_LIST.get(encoding.lower()) == 'utf_8' # Quote strings used for writing values squot = "'%s'" dquot = '"%s"' noquot = "%s" wspace_plus = ' \r\n\v\t\'"' tsquot = '"""%s"""' tdquot = "'''%s'''" # Sentinel for use in getattr calls to replace hasattr MISSING = object() __all__ = ( 'DEFAULT_INDENT_TYPE', 'DEFAULT_INTERPOLATION', 'ConfigObjError', 'NestingError', 'ParseError', 'DuplicateError', 'ConfigspecError', 'ConfigObj', 'SimpleVal', 'InterpolationError', 'InterpolationLoopError', 'MissingInterpolationOption', 'RepeatSectionError', 'ReloadError', 'UnreprError', 'UnknownType', 'flatten_errors', 'get_extra_values' ) DEFAULT_INTERPOLATION = 'configparser' DEFAULT_INDENT_TYPE = ' ' MAX_INTERPOL_DEPTH = 10 OPTION_DEFAULTS = { 'interpolation': True, 'raise_errors': False, 'list_values': True, 'create_empty': False, 'file_error': False, 'configspec': None, 'stringify': True, # option may be set to one of ('', ' ', '\t') 'indent_type': None, 'encoding': None, 'default_encoding': None, 'unrepr': False, 'write_empty_values': False, } # this could be replaced if six is used for compatibility, or there are no # more assertions about items being a string def getObj(s): global compiler if compiler is None: import compiler s = "a=" + s p = compiler.parse(s) return p.getChildren()[1].getChildren()[0].getChildren()[1] class UnknownType(Exception): pass class Builder(object): def build(self, o): if m is None: raise UnknownType(o.__class__.__name__) return m(o) def build_List(self, o): return list(map(self.build, o.getChildren())) def build_Const(self, o): return o.value def build_Dict(self, o): d = {} i = iter(map(self.build, o.getChildren())) for el in i: d[el] = next(i) return d def build_Tuple(self, o): return tuple(self.build_List(o)) def build_Name(self, o): if o.name == 'None': return None if o.name == 'True': return True if o.name == 'False': return False # An undefined Name raise UnknownType('Undefined Name') def build_Add(self, o): real, imag = list(map(self.build_Const, o.getChildren())) try: real = float(real) except TypeError: raise UnknownType('Add') if not isinstance(imag, complex) or imag.real != 0.0: raise UnknownType('Add') return real+imag def build_Getattr(self, o): parent = self.build(o.expr) return getattr(parent, o.attrname) def build_UnarySub(self, o): return -self.build_Const(o.getChildren()[0]) def build_UnaryAdd(self, o): return self.build_Const(o.getChildren()[0]) _builder = Builder() def unrepr(s): if not s: return s # this is supposed to be safe import ast return ast.literal_eval(s) class ConfigObjError(SyntaxError): """ This is the base class for all errors that ConfigObj raises. It is a subclass of SyntaxError. """ def __init__(self, message='', line_number=None, line=''): self.line = line self.line_number = line_number SyntaxError.__init__(self, message) class NestingError(ConfigObjError): """ This error indicates a level of nesting that doesn't match. """ class ParseError(ConfigObjError): """ This error indicates that a line is badly written. It is neither a valid ``key = value`` line, nor a valid section marker line. """ class ReloadError(IOError): """ A 'reload' operation failed. This exception is a subclass of ``IOError``. """ def __init__(self): IOError.__init__(self, 'reload failed, filename is not set.') class DuplicateError(ConfigObjError): """ The keyword or section specified already exists. """ class ConfigspecError(ConfigObjError): """ An error occured whilst parsing a configspec. """ class InterpolationError(ConfigObjError): """Base class for the two interpolation errors.""" class InterpolationLoopError(InterpolationError): """Maximum interpolation depth exceeded in string interpolation.""" def __init__(self, option): InterpolationError.__init__( self, 'interpolation loop detected in value "%s".' % option) class RepeatSectionError(ConfigObjError): """ This error indicates additional sections in a section with a ``__many__`` (repeated) section. """ class MissingInterpolationOption(InterpolationError): """A value specified for interpolation was missing.""" def __init__(self, option): msg = 'missing option "%s" in interpolation.' % option InterpolationError.__init__(self, msg) class UnreprError(ConfigObjError): """An error parsing in unrepr mode.""" class InterpolationEngine(object): """ A helper class to help perform string interpolation. This class is an abstract base class; its descendants perform the actual work. """ # compiled regexp to use in self.interpolate() _KEYCRE = re.compile(r"%\(([^)]*)\)s") _cookie = '%' def __init__(self, section): # the Section instance that "owns" this engine self.section = section def interpolate(self, key, value): # short-cut if not self._cookie in value: return value def recursive_interpolate(key, value, section, backtrail): """The function that does the actual work. ``value``: the string we're trying to interpolate. ``section``: the section in which that string was found ``backtrail``: a dict to keep track of where we've been, to detect and prevent infinite recursion loops This is similar to a depth-first-search algorithm. """ # Have we been here already? if (key, section.name) in backtrail: # Yes - infinite loop detected raise InterpolationLoopError(key) # Place a marker on our backtrail so we won't come back here again backtrail[(key, section.name)] = 1 # Now start the actual work match = self._KEYCRE.search(value) while match: # The actual parsing of the match is implementation-dependent, # so delegate to our helper function k, v, s = self._parse_match(match) if k is None: # That's the signal that no further interpolation is needed replacement = v else: # Further interpolation may be needed to obtain final value replacement = recursive_interpolate(k, v, s, backtrail) # Replace the matched string with its final value start, end = match.span() value = ''.join((value[:start], replacement, value[end:])) new_search_start = start + len(replacement) # Pick up the next interpolation key, if any, for next time # through the while loop match = self._KEYCRE.search(value, new_search_start) # Now safe to come back here again; remove marker from backtrail del backtrail[(key, section.name)] return value # Back in interpolate(), all we have to do is kick off the recursive # function with appropriate starting values value = recursive_interpolate(key, value, self.section, {}) return value def _fetch(self, key): """Helper function to fetch values from owning section. Returns a 2-tuple: the value, and the section where it was found. """ # switch off interpolation before we try and fetch anything ! save_interp = self.section.main.interpolation self.section.main.interpolation = False # Start at section that "owns" this InterpolationEngine current_section = self.section while True: # try the current section first val = current_section.get(key) if val is not None and not isinstance(val, Section): break # try "DEFAULT" next val = current_section.get('DEFAULT', {}).get(key) if val is not None and not isinstance(val, Section): break # move up to parent and try again # top-level's parent is itself if current_section.parent is current_section: # reached top level, time to give up break current_section = current_section.parent # restore interpolation to previous value before returning self.section.main.interpolation = save_interp if val is None: raise MissingInterpolationOption(key) return val, current_section def _parse_match(self, match): """Implementation-dependent helper function. Will be passed a match object corresponding to the interpolation key we just found (e.g., "%(foo)s" or "$foo"). Should look up that key in the appropriate config file section (using the ``_fetch()`` helper function) and return a 3-tuple: (key, value, section) ``key`` is the name of the key we're looking for ``value`` is the value found for that key ``section`` is a reference to the section where it was found ``key`` and ``section`` should be None if no further interpolation should be performed on the resulting value (e.g., if we interpolated "$$" and returned "$"). """ raise NotImplementedError() class ConfigParserInterpolation(InterpolationEngine): """Behaves like ConfigParser.""" _cookie = '%' _KEYCRE = re.compile(r"%\(([^)]*)\)s") def _parse_match(self, match): key = match.group(1) value, section = self._fetch(key) return key, value, section class TemplateInterpolation(InterpolationEngine): """Behaves like string.Template.""" _cookie = '$' _delimiter = '$' _KEYCRE = re.compile(r""" \$(?: (?P<escaped>\$) | # Two $ signs (?P<named>[_a-z][_a-z0-9]*) | # $name format {(?P<braced>[^}]*)} # ${name} format ) """, re.IGNORECASE | re.VERBOSE) def _parse_match(self, match): # Valid name (in or out of braces): fetch value from section key = match.group('named') or match.group('braced') if key is not None: value, section = self._fetch(key) return key, value, section # Escaped delimiter (e.g., $$): return single delimiter if match.group('escaped') is not None: # Return None for key and section to indicate it's time to stop return None, self._delimiter, None # Anything else: ignore completely, just return it unchanged return None, match.group(), None interpolation_engines = { 'configparser': ConfigParserInterpolation, 'template': TemplateInterpolation, } def __newobj__(cls, *args): # Hack for pickle return cls.__new__(cls, *args) class Section(dict): """ A dictionary-like object that represents a section in a config file. It does string interpolation if the 'interpolation' attribute of the 'main' object is set to True. Interpolation is tried first from this object, then from the 'DEFAULT' section of this object, next from the parent and its 'DEFAULT' section, and so on until the main object is reached. A Section will behave like an ordered dictionary - following the order of the ``scalars`` and ``sections`` attributes. You can use this to change the order of members. Iteration follows the order: scalars, then sections. """ def __setstate__(self, state): dict.update(self, state[0]) self.__dict__.update(state[1]) def __reduce__(self): state = (dict(self), self.__dict__) return (__newobj__, (self.__class__,), state) def __init__(self, parent, depth, main, indict=None, name=None): """ * parent is the section above * depth is the depth level of this section * main is the main ConfigObj * indict is a dictionary to initialise the section with """ if indict is None: indict = {} dict.__init__(self) # used for nesting level *and* interpolation self.parent = parent # used for the interpolation attribute self.main = main # level of nesting depth of this Section self.depth = depth # purely for information self.name = name # self._initialise() # we do this explicitly so that __setitem__ is used properly # (rather than just passing to ``dict.__init__``) for entry, value in indict.items(): self[entry] = value def _initialise(self): # the sequence of scalar values in this Section self.scalars = [] # the sequence of sections in this Section self.sections = [] # for comments :-) self.comments = {} self.inline_comments = {} # the configspec self.configspec = None # for defaults self.defaults = [] self.default_values = {} self.extra_values = [] self._created = False def _interpolate(self, key, value): try: # do we already have an interpolation engine? engine = self._interpolation_engine except AttributeError: # not yet: first time running _interpolate(), so pick the engine name = self.main.interpolation if name == True: # note that "if name:" would be incorrect here # backwards-compatibility: interpolation=True means use default name = DEFAULT_INTERPOLATION name = name.lower() # so that "Template", "template", etc. all work class_ = interpolation_engines.get(name, None) if class_ is None: # invalid value for self.main.interpolation self.main.interpolation = False return value else: # save reference to engine so we don't have to do this again engine = self._interpolation_engine = class_(self) # let the engine do the actual work return engine.interpolate(key, value) def __getitem__(self, key): """Fetch the item and do string interpolation.""" val = dict.__getitem__(self, key) if self.main.interpolation: if isinstance(val, str): return self._interpolate(key, val) if isinstance(val, list): def _check(entry): if isinstance(entry, str): return self._interpolate(key, entry) return entry new = [_check(entry) for entry in val] if new != val: return new return val def __setitem__(self, key, value, unrepr=False): """ Correctly set a value. Making dictionary values Section instances. (We have to special case 'Section' instances - which are also dicts) Keys must be strings. Values need only be strings (or lists of strings) if ``main.stringify`` is set. ``unrepr`` must be set when setting a value to a dictionary, without creating a new sub-section. """ if not isinstance(key, str): raise ValueError('The key "%s" is not a string.' % key) # add the comment if key not in self.comments: self.comments[key] = [] self.inline_comments[key] = '' # remove the entry from defaults if key in self.defaults: self.defaults.remove(key) # if isinstance(value, Section): if key not in self: self.sections.append(key) dict.__setitem__(self, key, value) elif isinstance(value, collections.Mapping) and not unrepr: # First create the new depth level, # then create the section if key not in self: self.sections.append(key) new_depth = self.depth + 1 dict.__setitem__( self, key, Section( self, new_depth, self.main, indict=value, name=key)) else: if key not in self: self.scalars.append(key) if not self.main.stringify: if isinstance(value, str): pass elif isinstance(value, (list, tuple)): for entry in value: if not isinstance(entry, str): raise TypeError('Value is not a string "%s".' % entry) else: raise TypeError('Value is not a string "%s".' % value) dict.__setitem__(self, key, value) def __delitem__(self, key): """Remove items from the sequence when deleting.""" dict. __delitem__(self, key) if key in self.scalars: self.scalars.remove(key) else: self.sections.remove(key) del self.comments[key] del self.inline_comments[key] def get(self, key, default=None): """A version of ``get`` that doesn't bypass string interpolation.""" try: return self[key] except KeyError: return default def update(self, indict): """ A version of update that uses our ``__setitem__``. """ for entry in indict: self[entry] = indict[entry] def pop(self, key, default=MISSING): """ 'D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised' """ try: val = self[key] except KeyError: if default is MISSING: raise val = default else: del self[key] return val def popitem(self): """Pops the first (key,val)""" sequence = (self.scalars + self.sections) if not sequence: raise KeyError(": 'popitem(): dictionary is empty'") key = sequence[0] val = self[key] del self[key] return key, val def clear(self): """ A version of clear that also affects scalars/sections Also clears comments and configspec. Leaves other attributes alone : depth/main/parent are not affected """ dict.clear(self) self.scalars = [] self.sections = [] self.comments = {} self.inline_comments = {} self.configspec = None self.defaults = [] self.extra_values = [] def setdefault(self, key, default=None): """A version of setdefault that sets sequence if appropriate.""" try: return self[key] except KeyError: self[key] = default return self[key] def items(self): """D.items() -> list of D's (key, value) pairs, as 2-tuples""" return list(zip((self.scalars + self.sections), list(self.values()))) def keys(self): """D.keys() -> list of D's keys""" return (self.scalars + self.sections) def values(self): """D.values() -> list of D's values""" return [self[key] for key in (self.scalars + self.sections)] def iteritems(self): """D.iteritems() -> an iterator over the (key, value) items of D""" return iter(list(self.items())) def iterkeys(self): """D.iterkeys() -> an iterator over the keys of D""" return iter((self.scalars + self.sections)) __iter__ = iterkeys def itervalues(self): """D.itervalues() -> an iterator over the values of D""" return iter(list(self.values())) def __repr__(self): """x.__repr__() <==> repr(x)""" def _getval(key): try: return self[key] except MissingInterpolationOption: return dict.__getitem__(self, key) return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(_getval(key)))) for key in (self.scalars + self.sections)]) __str__ = __repr__ __str__.__doc__ = "x.__str__() <==> str(x)" # Extra methods - not in a normal dictionary def dict(self): """ Return a deepcopy of self as a dictionary. All members that are ``Section`` instances are recursively turned to ordinary dictionaries - by calling their ``dict`` method. >>> n = a.dict() >>> n == a 1 >>> n is a 0 """ newdict = {} for entry in self: this_entry = self[entry] if isinstance(this_entry, Section): this_entry = this_entry.dict() elif isinstance(this_entry, list): # create a copy rather than a reference this_entry = list(this_entry) elif isinstance(this_entry, tuple): # create a copy rather than a reference this_entry = tuple(this_entry) newdict[entry] = this_entry return newdict def merge(self, indict): """ A recursive update - useful for merging config files. >>> a = '''[section1] ... option1 = True ... [[subsection]] ... more_options = False ... # end of file'''.splitlines() >>> b = '''# File is user.ini ... [section1] ... option1 = False ... # end of file'''.splitlines() >>> c1 = ConfigObj(b) >>> c2 = ConfigObj(a) >>> c2.merge(c1) >>> c2 ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}) """ for key, val in list(indict.items()): if (key in self and isinstance(self[key], collections.Mapping) and isinstance(val, collections.Mapping)): self[key].merge(val) else: self[key] = val def rename(self, oldkey, newkey): """ Change a keyname to another, without changing position in sequence. Implemented so that transformations can be made on keys, as well as on values. (used by encode and decode) Also renames comments. """ if oldkey in self.scalars: the_list = self.scalars elif oldkey in self.sections: the_list = self.sections else: raise KeyError('Key "%s" not found.' % oldkey) pos = the_list.index(oldkey) # val = self[oldkey] dict.__delitem__(self, oldkey) dict.__setitem__(self, newkey, val) the_list.remove(oldkey) the_list.insert(pos, newkey) comm = self.comments[oldkey] inline_comment = self.inline_comments[oldkey] del self.comments[oldkey] del self.inline_comments[oldkey] self.comments[newkey] = comm self.inline_comments[newkey] = inline_comment def walk(self, function, raise_errors=True, call_on_sections=False, **keywargs): """ Walk every member and call a function on the keyword and value. Return a dictionary of the return values If the function raises an exception, raise the errror unless ``raise_errors=False``, in which case set the return value to ``False``. Any unrecognised keyword arguments you pass to walk, will be pased on to the function you pass in. Note: if ``call_on_sections`` is ``True`` then - on encountering a subsection, *first* the function is called for the *whole* subsection, and then recurses into it's members. This means your function must be able to handle strings, dictionaries and lists. This allows you to change the key of subsections as well as for ordinary members. The return value when called on the whole subsection has to be discarded. See the encode and decode methods for examples, including functions. .. admonition:: caution You can use ``walk`` to transform the names of members of a section but you mustn't add or delete members. >>> config = '''[XXXXsection] ... XXXXkey = XXXXvalue'''.splitlines() >>> cfg = ConfigObj(config) >>> cfg ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}}) >>> def transform(section, key): ... val = section[key] ... newkey = key.replace('XXXX', 'CLIENT1') ... section.rename(key, newkey) ... if isinstance(val, (tuple, list, dict)): ... pass ... else: ... val = val.replace('XXXX', 'CLIENT1') ... section[newkey] = val >>> cfg.walk(transform, call_on_sections=True) {'CLIENT1section': {'CLIENT1key': None}} >>> cfg ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}}) """ out = {} # scalars first for i in range(len(self.scalars)): entry = self.scalars[i] try: val = function(self, entry, **keywargs) # bound again in case name has changed entry = self.scalars[i] out[entry] = val except Exception: if raise_errors: raise else: entry = self.scalars[i] out[entry] = False # then sections for i in range(len(self.sections)): entry = self.sections[i] if call_on_sections: try: function(self, entry, **keywargs) except Exception: if raise_errors: raise else: entry = self.sections[i] out[entry] = False # bound again in case name has changed entry = self.sections[i] # previous result is discarded out[entry] = self[entry].walk( function, raise_errors=raise_errors, call_on_sections=call_on_sections, **keywargs) return out def as_bool(self, key): """ Accepts a key as input. The corresponding value must be a string or the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to retain compatibility with Python 2.2. If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns ``True``. If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns ``False``. ``as_bool`` is not case sensitive. Any other input will raise a ``ValueError``. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_bool('a') Traceback (most recent call last): ValueError: Value "fish" is neither True nor False >>> a['b'] = 'True' >>> a.as_bool('b') 1 >>> a['b'] = 'off' >>> a.as_bool('b') 0 """ val = self[key] if val == True: return True elif val == False: return False else: try: if not isinstance(val, str): # TODO: Why do we raise a KeyError here? raise KeyError() else: return self.main._bools[val.lower()] except KeyError: raise ValueError('Value "%s" is neither True nor False' % val) def as_int(self, key): """ A convenience method which coerces the specified value to an integer. If the value is an invalid literal for ``int``, a ``ValueError`` will be raised. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_int('a') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: 'fish' >>> a['b'] = '1' >>> a.as_int('b') 1 >>> a['b'] = '3.2' >>> a.as_int('b') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: '3.2' """ return int(self[key]) def as_float(self, key): """ A convenience method which coerces the specified value to a float. If the value is an invalid literal for ``float``, a ``ValueError`` will be raised. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_float('a') #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ValueError: invalid literal for float(): fish >>> a['b'] = '1' >>> a.as_float('b') 1.0 >>> a['b'] = '3.2' >>> a.as_float('b') #doctest: +ELLIPSIS 3.2... """ return float(self[key]) def as_list(self, key): """ A convenience method which fetches the specified value, guaranteeing that it is a list. >>> a = ConfigObj() >>> a['a'] = 1 >>> a.as_list('a') [1] >>> a['a'] = (1,) >>> a.as_list('a') [1] >>> a['a'] = [1] >>> a.as_list('a') [1] """ result = self[key] if isinstance(result, (tuple, list)): return list(result) return [result] def restore_default(self, key): """ Restore (and return) default value for the specified key. This method will only work for a ConfigObj that was created with a configspec and has been validated. If there is no default value for this key, ``KeyError`` is raised. """ default = self.default_values[key] dict.__setitem__(self, key, default) if key not in self.defaults: self.defaults.append(key) return default def restore_defaults(self): """ Recursively restore default values to all members that have them. This method will only work for a ConfigObj that was created with a configspec and has been validated. It doesn't delete or modify entries without default values. """ for key in self.default_values: self.restore_default(key) for section in self.sections: self[section].restore_defaults() class ConfigObj(Section): """An object to read, create, and write config files.""" _keyword = re.compile(r'''^ # line start (\s*) # indentation ( # keyword (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'"=].*?) # no quotes ) \s*=\s* # divider (.*) # value (including list values and comments) $ # line end ''', re.VERBOSE) _sectionmarker = re.compile(r'''^ (\s*) # 1: indentation ((?:\[\s*)+) # 2: section marker open ( # 3: section name open (?:"\s*\S.*?\s*")| # at least one non-space with double quotes (?:'\s*\S.*?\s*')| # at least one non-space with single quotes (?:[^'"\s].*?) # at least one non-space unquoted ) # section name close ((?:\s*\])+) # 4: section marker close \s*(\#.*)? # 5: optional comment $''', re.VERBOSE) # this regexp pulls list values out as a single string # or single values and comments # FIXME: this regex adds a '' to the end of comma terminated lists # workaround in ``_handle_value`` _valueexp = re.compile(r'''^ (?: (?: ( (?: (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#][^,\#]*?) # unquoted ) \s*,\s* # comma )* # match all list items ending in a comma (if any) ) ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#\s][^,]*?)| # unquoted (?:(?<!,)) # Empty value )? # last item in a list - or string value )| (,) # alternatively a single comma - empty list ) \s*(\#.*)? # optional comment $''', re.VERBOSE) # use findall to get the members of a list value _listvalueexp = re.compile(r''' ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#]?.*?) # unquoted ) \s*,\s* # comma ''', re.VERBOSE) # this regexp is used for the value # when lists are switched off _nolistvalue = re.compile(r'''^ ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'"\#].*?)| # unquoted (?:) # Empty value ) \s*(\#.*)? # optional comment $''', re.VERBOSE) # regexes for finding triple quoted values on one line _single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$") _single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$') _multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$") _multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$') _triple_quote = { "'''": (_single_line_single, _multi_line_single), '"""': (_single_line_double, _multi_line_double), } # Used by the ``istrue`` Section method _bools = { 'yes': True, 'no': False, 'on': True, 'off': False, '1': True, '0': False, 'true': True, 'false': False, } def __init__(self, infile=None, options=None, configspec=None, encoding=None, interpolation=True, raise_errors=False, list_values=True, create_empty=False, file_error=False, stringify=True, indent_type=None, default_encoding=None, unrepr=False, write_empty_values=False, _inspec=False): """ Parse a config file or create a config file object. ``ConfigObj(infile=None, configspec=None, encoding=None, interpolation=True, raise_errors=False, list_values=True, create_empty=False, file_error=False, stringify=True, indent_type=None, default_encoding=None, unrepr=False, write_empty_values=False, _inspec=False)`` """ self._inspec = _inspec # init the superclass Section.__init__(self, self, 0, self) infile = infile or [] _options = {'configspec': configspec, 'encoding': encoding, 'interpolation': interpolation, 'raise_errors': raise_errors, 'list_values': list_values, 'create_empty': create_empty, 'file_error': file_error, 'stringify': stringify, 'indent_type': indent_type, 'default_encoding': default_encoding, 'unrepr': unrepr, 'write_empty_values': write_empty_values} if options is None: options = _options else: import warnings warnings.warn('Passing in an options dictionary to ConfigObj() is ' 'deprecated. Use **options instead.', DeprecationWarning) # TODO: check the values too. for entry in options: if entry not in OPTION_DEFAULTS: raise TypeError('Unrecognised option "%s".' % entry) for entry, value in list(OPTION_DEFAULTS.items()): if entry not in options: options[entry] = value keyword_value = _options[entry] if value != keyword_value: options[entry] = keyword_value # XXXX this ignores an explicit list_values = True in combination # with _inspec. The user should *never* do that anyway, but still... if _inspec: options['list_values'] = False self._initialise(options) configspec = options['configspec'] self._original_configspec = configspec self._load(infile, configspec) def _load(self, infile, configspec): if isinstance(infile, str): self.filename = infile if os.path.isfile(infile): with open(infile, 'rb') as h: content = h.readlines() or [] elif self.file_error: # raise an error if the file doesn't exist raise IOError('Config file not found: "%s".' % self.filename) else: # file doesn't already exist if self.create_empty: # this is a good test that the filename specified # isn't impossible - like on a non-existent device with open(infile, 'w') as h: h.write('') content = [] elif isinstance(infile, (list, tuple)): content = list(infile) elif isinstance(infile, dict): # initialise self # the Section class handles creating subsections if isinstance(infile, ConfigObj): # get a copy of our ConfigObj def set_section(in_section, this_section): for entry in in_section.scalars: this_section[entry] = in_section[entry] for section in in_section.sections: this_section[section] = {} set_section(in_section[section], this_section[section]) set_section(infile, self) else: for entry in infile: self[entry] = infile[entry] del self._errors if configspec is not None: self._handle_configspec(configspec) else: self.configspec = None return elif getattr(infile, 'read', MISSING) is not MISSING: # This supports file like objects content = infile.read() or [] # needs splitting into lines - but needs doing *after* decoding # in case it's not an 8 bit encoding else: raise TypeError('infile must be a filename, file like object, or list of lines.') if content: # don't do it for the empty ConfigObj content = self._handle_bom(content) # infile is now *always* a list # # Set the newlines attribute (first line ending it finds) # and strip trailing '\n' or '\r' from lines for line in content: if (not line) or (line[-1] not in ('\r', '\n')): continue for end in ('\r\n', '\n', '\r'): if line.endswith(end): self.newlines = end break break assert all(isinstance(line, str) for line in content), repr(content) content = [line.rstrip('\r\n') for line in content] self._parse(content) # if we had any errors, now is the time to raise them if self._errors: info = "at line %s." % self._errors[0].line_number if len(self._errors) > 1: msg = "Parsing failed with several errors.\nFirst error %s" % info error = ConfigObjError(msg) else: error = self._errors[0] # set the errors attribute; it's a list of tuples: # (error_type, message, line_number) error.errors = self._errors # set the config attribute error.config = self raise error # delete private attributes del self._errors if configspec is None: self.configspec = None else: self._handle_configspec(configspec) def _initialise(self, options=None): if options is None: options = OPTION_DEFAULTS # initialise a few variables self.filename = None self._errors = [] self.raise_errors = options['raise_errors'] self.interpolation = options['interpolation'] self.list_values = options['list_values'] self.create_empty = options['create_empty'] self.file_error = options['file_error'] self.stringify = options['stringify'] self.indent_type = options['indent_type'] self.encoding = options['encoding'] self.default_encoding = options['default_encoding'] self.BOM = False self.newlines = None self.write_empty_values = options['write_empty_values'] self.unrepr = options['unrepr'] self.initial_comment = [] self.final_comment = [] self.configspec = None if self._inspec: self.list_values = False # Clear section attributes as well Section._initialise(self) def __repr__(self): def _getval(key): try: return self[key] except MissingInterpolationOption: return dict.__getitem__(self, key) return ('%s({%s})' % (self.__class__.__name__, ', '.join([('%s: %s' % (repr(key), repr(_getval(key)))) for key in (self.scalars + self.sections)]))) def _handle_bom(self, infile): """ Handle any BOM, and decode if necessary. If an encoding is specified, that *must* be used - but the BOM should still be removed (and the BOM attribute set). (If the encoding is wrongly specified, then a BOM for an alternative encoding won't be discovered or removed.) If an encoding is not specified, UTF8 or UTF16 BOM will be detected and removed. The BOM attribute will be set. UTF16 will be decoded to unicode. NOTE: This method must not be called with an empty ``infile``. Specifying the *wrong* encoding is likely to cause a ``UnicodeDecodeError``. ``infile`` must always be returned as a list of lines, but may be passed in as a single string. """ if ((self.encoding is not None) and (self.encoding.lower() not in BOM_LIST)): # No need to check for a BOM # the encoding specified doesn't have one # just decode return self._decode(infile, self.encoding) if isinstance(infile, (list, tuple)): line = infile[0] else: line = infile if isinstance(line, str): # it's already decoded and there's no need to do anything # else, just use the _decode utility method to handle # listifying appropriately return self._decode(infile, self.encoding) if self.encoding is not None: # encoding explicitly supplied # And it could have an associated BOM # TODO: if encoding is just UTF16 - we ought to check for both # TODO: big endian and little endian versions. enc = BOM_LIST[self.encoding.lower()] if enc == 'utf_16': # For UTF16 we try big endian and little endian for BOM, (encoding, final_encoding) in list(BOMS.items()): if not final_encoding: # skip UTF8 continue if infile.startswith(BOM): ### BOM discovered ##self.BOM = True # Don't need to remove BOM return self._decode(infile, encoding) # If we get this far, will *probably* raise a DecodeError # As it doesn't appear to start with a BOM return self._decode(infile, self.encoding) # Must be UTF8 BOM = BOM_SET[enc] if not line.startswith(BOM): return self._decode(infile, self.encoding) newline = line[len(BOM):] # BOM removed if isinstance(infile, (list, tuple)): infile[0] = newline else: infile = newline self.BOM = True return self._decode(infile, self.encoding) # No encoding specified - so we need to check for UTF8/UTF16 for BOM, (encoding, final_encoding) in list(BOMS.items()): if not isinstance(line, bytes) or not line.startswith(BOM): # didn't specify a BOM, or it's not a bytestring continue else: # BOM discovered self.encoding = final_encoding if not final_encoding: self.BOM = True # UTF8 # remove BOM newline = line[len(BOM):] if isinstance(infile, (list, tuple)): infile[0] = newline else: infile = newline # UTF-8 if isinstance(infile, str): return infile.splitlines(True) elif isinstance(infile, bytes): return infile.decode('utf-8').splitlines(True) else: return self._decode(infile, 'utf-8') # UTF16 - have to decode return self._decode(infile, encoding) # No BOM discovered and no encoding specified, default to UTF-8 if isinstance(infile, bytes): return infile.decode('utf-8').splitlines(True) else: return self._decode(infile, 'utf-8') def _a_to_u(self, aString): """Decode ASCII strings to unicode if a self.encoding is specified.""" if isinstance(aString, bytes) and self.encoding: return aString.decode(self.encoding) else: return aString def _decode(self, infile, encoding): """ Decode infile to unicode. Using the specified encoding. if is a string, it also needs converting to a list. """ if isinstance(infile, str): return infile.splitlines(True) if isinstance(infile, bytes): # NOTE: Could raise a ``UnicodeDecodeError`` if encoding: return infile.decode(encoding).splitlines(True) else: return infile.splitlines(True) if encoding: for i, line in enumerate(infile): if isinstance(line, bytes): # NOTE: The isinstance test here handles mixed lists of unicode/string # NOTE: But the decode will break on any non-string values # NOTE: Or could raise a ``UnicodeDecodeError`` infile[i] = line.decode(encoding) return infile def _decode_element(self, line): """Decode element to unicode if necessary.""" if isinstance(line, bytes) and self.default_encoding: return line.decode(self.default_encoding) else: return line # TODO: this may need to be modified def _str(self, value): """ Used by ``stringify`` within validate, to turn non-string values into strings. """ if not isinstance(value, str): # intentially 'str' because it's just whatever the "normal" # string type is for the python version we're dealing with return str(value) else: return value def _parse(self, infile): """Actually parse the config file.""" temp_list_values = self.list_values if self.unrepr: self.list_values = False comment_list = [] done_start = False this_section = self maxline = len(infile) - 1 cur_index = -1 reset_comment = False while cur_index < maxline: if reset_comment: comment_list = [] cur_index += 1 line = infile[cur_index] sline = line.strip() # do we have anything on the line ? if not sline or sline.startswith('#'): reset_comment = False comment_list.append(line) continue if not done_start: # preserve initial comment self.initial_comment = comment_list comment_list = [] done_start = True reset_comment = True # first we check if it's a section marker mat = self._sectionmarker.match(line) if mat is not None: # is a section line (indent, sect_open, sect_name, sect_close, comment) = mat.groups() if indent and (self.indent_type is None): self.indent_type = indent cur_depth = sect_open.count('[') if cur_depth != sect_close.count(']'): self._handle_error("Cannot compute the section depth", NestingError, infile, cur_index) continue if cur_depth < this_section.depth: # the new section is dropping back to a previous level try: parent = self._match_depth(this_section, cur_depth).parent except SyntaxError: self._handle_error("Cannot compute nesting level", NestingError, infile, cur_index) continue elif cur_depth == this_section.depth: # the new section is a sibling of the current section parent = this_section.parent elif cur_depth == this_section.depth + 1: # the new section is a child the current section parent = this_section else: self._handle_error("Section too nested", NestingError, infile, cur_index) continue sect_name = self._unquote(sect_name) if sect_name in parent: self._handle_error('Duplicate section name', DuplicateError, infile, cur_index) continue # create the new section this_section = Section( parent, cur_depth, self, name=sect_name) parent[sect_name] = this_section parent.inline_comments[sect_name] = comment parent.comments[sect_name] = comment_list continue # # it's not a section marker, # so it should be a valid ``key = value`` line mat = self._keyword.match(line) if mat is None: self._handle_error( 'Invalid line ({0!r}) (matched as neither section nor keyword)'.format(line), ParseError, infile, cur_index) else: # is a keyword value # value will include any inline comment (indent, key, value) = mat.groups() if indent and (self.indent_type is None): self.indent_type = indent # check for a multiline value if value[:3] in ['"""', "'''"]: try: value, comment, cur_index = self._multiline( value, infile, cur_index, maxline) except SyntaxError: self._handle_error( 'Parse error in multiline value', ParseError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception as e: if type(e) == UnknownType: msg = 'Unknown name or type in value' else: msg = 'Parse error from unrepr-ing multiline value' self._handle_error(msg, UnreprError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception as e: if isinstance(e, UnknownType): msg = 'Unknown name or type in value' else: msg = 'Parse error from unrepr-ing value' self._handle_error(msg, UnreprError, infile, cur_index) continue else: # extract comment and lists try: (value, comment) = self._handle_value(value) except SyntaxError: self._handle_error( 'Parse error in value', ParseError, infile, cur_index) continue # key = self._unquote(key) if key in this_section: self._handle_error( 'Duplicate keyword name', DuplicateError, infile, cur_index) continue # add the key. # we set unrepr because if we have got this far we will never # be creating a new section this_section.__setitem__(key, value, unrepr=True) this_section.inline_comments[key] = comment this_section.comments[key] = comment_list continue # if self.indent_type is None: # no indentation used, set the type accordingly self.indent_type = '' # preserve the final comment if not self and not self.initial_comment: self.initial_comment = comment_list elif not reset_comment: self.final_comment = comment_list self.list_values = temp_list_values def _match_depth(self, sect, depth): """ Given a section and a depth level, walk back through the sections parents to see if the depth level matches a previous section. Return a reference to the right section, or raise a SyntaxError. """ while depth < sect.depth: if sect is sect.parent: # we've reached the top level already raise SyntaxError() sect = sect.parent if sect.depth == depth: return sect # shouldn't get here raise SyntaxError() def _handle_error(self, text, ErrorClass, infile, cur_index): """ Handle an error according to the error settings. Either raise the error or store it. The error will have occured at ``cur_index`` """ line = infile[cur_index] cur_index += 1 message = '{0} at line {1}.'.format(text, cur_index) error = ErrorClass(message, cur_index, line) if self.raise_errors: # raise the error - parsing stops here raise error # store the error # reraise when parsing has finished self._errors.append(error) def _unquote(self, value): """Return an unquoted version of a value""" if not value: # should only happen during parsing of lists raise SyntaxError if (value[0] == value[-1]) and (value[0] in ('"', "'")): value = value[1:-1] return value def _quote(self, value, multiline=True): """ Return a safely quoted version of a value. Raise a ConfigObjError if the value cannot be safely quoted. If multiline is ``True`` (default) then use triple quotes if necessary. * Don't quote values that don't need it. * Recursively quote members of a list and return a comma joined list. * Multiline is ``False`` for lists. * Obey list syntax for empty and single member lists. If ``list_values=False`` then the value is only quoted if it contains a ``\\n`` (is multiline) or '#'. If ``write_empty_values`` is set, and the value is an empty string, it won't be quoted. """ if multiline and self.write_empty_values and value == '': # Only if multiline is set, so that it is used for values not # keys, and not values that are part of a list return '' if multiline and isinstance(value, (list, tuple)): if not value: return ',' elif len(value) == 1: return self._quote(value[0], multiline=False) + ',' return ', '.join([self._quote(val, multiline=False) for val in value]) if not isinstance(value, str): if self.stringify: # intentially 'str' because it's just whatever the "normal" # string type is for the python version we're dealing with value = str(value) else: raise TypeError('Value "%s" is not a string.' % value) if not value: return '""' no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value )) hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value) check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote if check_for_single: if not self.list_values: # we don't quote if ``list_values=False`` quot = noquot # for normal values either single or double quotes will do elif '\n' in value: # will only happen if multiline is off - e.g. '\n' in key raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) elif ((value[0] not in wspace_plus) and (value[-1] not in wspace_plus) and (',' not in value)): quot = noquot else: quot = self._get_single_quote(value) else: # if value has '\n' or "'" *and* '"', it will need triple quotes quot = self._get_triple_quote(value) if quot == noquot and '#' in value and self.list_values: quot = self._get_single_quote(value) return quot % value def _get_single_quote(self, value): if ("'" in value) and ('"' in value): raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) elif '"' in value: quot = squot else: quot = dquot return quot def _get_triple_quote(self, value): if (value.find('"""') != -1) and (value.find("'''") != -1): raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) if value.find('"""') == -1: quot = tdquot else: quot = tsquot return quot def _handle_value(self, value): """ Given a value string, unquote, remove comment, handle lists. (including empty and single member lists) """ if self._inspec: # Parsing a configspec so don't handle comments return (value, '') # do we look for lists in values ? if not self.list_values: mat = self._nolistvalue.match(value) if mat is None: raise SyntaxError() # NOTE: we don't unquote here return mat.groups() # mat = self._valueexp.match(value) if mat is None: # the value is badly constructed, probably badly quoted, # or an invalid list raise SyntaxError() (list_values, single, empty_list, comment) = mat.groups() if (list_values == '') and (single is None): # change this if you want to accept empty values raise SyntaxError() # NOTE: note there is no error handling from here if the regex # is wrong: then incorrect values will slip through if empty_list is not None: # the single comma - meaning an empty list return ([], comment) if single is not None: # handle empty values if list_values and not single: # FIXME: the '' is a workaround because our regex now matches # '' at the end of a list if it has a trailing comma single = None else: single = single or '""' single = self._unquote(single) if list_values == '': # not a list value return (single, comment) the_list = self._listvalueexp.findall(list_values) the_list = [self._unquote(val) for val in the_list] if single is not None: the_list += [single] return (the_list, comment) def _multiline(self, value, infile, cur_index, maxline): """Extract the value, where we are in a multiline situation.""" quot = value[:3] newvalue = value[3:] single_line = self._triple_quote[quot][0] multi_line = self._triple_quote[quot][1] mat = single_line.match(value) if mat is not None: retval = list(mat.groups()) retval.append(cur_index) return retval elif newvalue.find(quot) != -1: # somehow the triple quote is missing raise SyntaxError() # while cur_index < maxline: cur_index += 1 newvalue += '\n' line = infile[cur_index] if line.find(quot) == -1: newvalue += line else: # end of multiline, process it break else: # we've got to the end of the config, oops... raise SyntaxError() mat = multi_line.match(line) if mat is None: # a badly formed line raise SyntaxError() (value, comment) = mat.groups() return (newvalue + value, comment, cur_index) def _handle_configspec(self, configspec): """Parse the configspec.""" # FIXME: Should we check that the configspec was created with the # correct settings ? (i.e. ``list_values=False``) if not isinstance(configspec, ConfigObj): try: configspec = ConfigObj(configspec, raise_errors=True, file_error=True, _inspec=True) except ConfigObjError as e: # FIXME: Should these errors have a reference # to the already parsed ConfigObj ? raise ConfigspecError('Parsing configspec failed: %s' % e) except IOError as e: raise IOError('Reading configspec failed: %s' % e) self.configspec = configspec def _set_configspec(self, section, copy): """ Called by validate. Handles setting the configspec on subsections including sections to be validated by __many__ """ configspec = section.configspec many = configspec.get('__many__') if isinstance(many, dict): for entry in section.sections: if entry not in configspec: section[entry].configspec = many for entry in configspec.sections: if entry == '__many__': continue if entry not in section: section[entry] = {} section[entry]._created = True if copy: # copy comments section.comments[entry] = configspec.comments.get(entry, []) section.inline_comments[entry] = configspec.inline_comments.get(entry, '') # Could be a scalar when we expect a section if isinstance(section[entry], Section): section[entry].configspec = configspec[entry] def _write_line(self, indent_string, entry, this_entry, comment): """Write an individual line, for the write method""" # NOTE: the calls to self._quote here handles non-StringType values. if not self.unrepr: val = self._decode_element(self._quote(this_entry)) else: val = repr(this_entry) return '%s%s%s%s%s' % (indent_string, self._decode_element(self._quote(entry, multiline=False)), self._a_to_u(' = '), val, self._decode_element(comment)) def _write_marker(self, indent_string, depth, entry, comment): """Write a section marker line""" return '%s%s%s%s%s' % (indent_string, self._a_to_u('[' * depth), self._quote(self._decode_element(entry), multiline=False), self._a_to_u(']' * depth), self._decode_element(comment)) def _handle_comment(self, comment): """Deal with a comment.""" if not comment: return '' start = self.indent_type if not comment.startswith('#'): start += self._a_to_u(' # ') return (start + comment) # Public methods def write(self, outfile=None, section=None): """ Write the current ConfigObj as a file tekNico: FIXME: use StringIO instead of real files >>> filename = a.filename >>> a.filename = 'test.ini' >>> a.write() >>> a.filename = filename >>> a == ConfigObj('test.ini', raise_errors=True) 1 >>> import os >>> os.remove('test.ini') """ if self.indent_type is None: # this can be true if initialised from a dictionary self.indent_type = DEFAULT_INDENT_TYPE out = [] cs = self._a_to_u('#') csp = self._a_to_u('# ') if section is None: int_val = self.interpolation self.interpolation = False section = self for line in self.initial_comment: line = self._decode_element(line) stripped_line = line.strip() if stripped_line and not stripped_line.startswith(cs): line = csp + line out.append(line) indent_string = self.indent_type * section.depth for entry in (section.scalars + section.sections): if entry in section.defaults: # don't write out default values continue for comment_line in section.comments[entry]: comment_line = self._decode_element(comment_line.lstrip()) if comment_line and not comment_line.startswith(cs): comment_line = csp + comment_line out.append(indent_string + comment_line) this_entry = section[entry] comment = self._handle_comment(section.inline_comments[entry]) if isinstance(this_entry, Section): # a section out.append(self._write_marker( indent_string, this_entry.depth, entry, comment)) out.extend(self.write(section=this_entry)) else: out.append(self._write_line( indent_string, entry, this_entry, comment)) if section is self: for line in self.final_comment: line = self._decode_element(line) stripped_line = line.strip() if stripped_line and not stripped_line.startswith(cs): line = csp + line out.append(line) self.interpolation = int_val if section is not self: return out if (self.filename is None) and (outfile is None): # output a list of lines # might need to encode # NOTE: This will *screw* UTF16, each line will start with the BOM if self.encoding: out = [l.encode(self.encoding) for l in out] if (self.BOM and ((self.encoding is None) or (BOM_LIST.get(self.encoding.lower()) == 'utf_8'))): # Add the UTF8 BOM if not out: out.append('') out[0] = BOM_UTF8 + out[0] return out # Turn the list to a string, joined with correct newlines newline = self.newlines or os.linesep if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w' and sys.platform == 'win32' and newline == '\r\n'): # Windows specific hack to avoid writing '\r\r\n' newline = '\n' output = self._a_to_u(newline).join(out) if not output.endswith(newline): output += newline if isinstance(output, bytes): output_bytes = output else: output_bytes = output.encode(self.encoding or self.default_encoding or 'ascii') if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)): # Add the UTF8 BOM output_bytes = BOM_UTF8 + output_bytes if outfile is not None: outfile.write(output_bytes) else: with open(self.filename, 'wb') as h: h.write(output_bytes) def validate(self, validator, preserve_errors=False, copy=False, section=None): """ Test the ConfigObj against a configspec. It uses the ``validator`` object from *validate.py*. To run ``validate`` on the current ConfigObj, call: :: test = config.validate(validator) (Normally having previously passed in the configspec when the ConfigObj was created - you can dynamically assign a dictionary of checks to the ``configspec`` attribute of a section though). It returns ``True`` if everything passes, or a dictionary of pass/fails (True/False). If every member of a subsection passes, it will just have the value ``True``. (It also returns ``False`` if all members fail). In addition, it converts the values from strings to their native types if their checks pass (and ``stringify`` is set). If ``preserve_errors`` is ``True`` (``False`` is default) then instead of a marking a fail with a ``False``, it will preserve the actual exception object. This can contain info about the reason for failure. For example the ``VdtValueTooSmallError`` indicates that the value supplied was too small. If a value (or section) is missing it will still be marked as ``False``. You must have the validate module to use ``preserve_errors=True``. You can then use the ``flatten_errors`` function to turn your nested results dictionary into a flattened list of failures - useful for displaying meaningful error messages. """ if section is None: if self.configspec is None: raise ValueError('No configspec supplied.') if preserve_errors: # We do this once to remove a top level dependency on the validate module # Which makes importing configobj faster from validate import VdtMissingValue self._vdtMissingValue = VdtMissingValue section = self if copy: section.initial_comment = section.configspec.initial_comment section.final_comment = section.configspec.final_comment section.encoding = section.configspec.encoding section.BOM = section.configspec.BOM section.newlines = section.configspec.newlines section.indent_type = section.configspec.indent_type # # section.default_values.clear() #?? configspec = section.configspec self._set_configspec(section, copy) def validate_entry(entry, spec, val, missing, ret_true, ret_false): section.default_values.pop(entry, None) try: section.default_values[entry] = validator.get_default_value(configspec[entry]) except (KeyError, AttributeError, validator.baseErrorClass): # No default, bad default or validator has no 'get_default_value' # (e.g. SimpleVal) pass try: check = validator.check(spec, val, missing=missing ) except validator.baseErrorClass as e: if not preserve_errors or isinstance(e, self._vdtMissingValue): out[entry] = False else: # preserve the error out[entry] = e ret_false = False ret_true = False else: ret_false = False out[entry] = True if self.stringify or missing: # if we are doing type conversion # or the value is a supplied default if not self.stringify: if isinstance(check, (list, tuple)): # preserve lists check = [self._str(item) for item in check] elif missing and check is None: # convert the None from a default to a '' check = '' else: check = self._str(check) if (check != val) or missing: section[entry] = check if not copy and missing and entry not in section.defaults: section.defaults.append(entry) return ret_true, ret_false # out = {} ret_true = True ret_false = True unvalidated = [k for k in section.scalars if k not in configspec] incorrect_sections = [k for k in configspec.sections if k in section.scalars] incorrect_scalars = [k for k in configspec.scalars if k in section.sections] for entry in configspec.scalars: if entry in ('__many__', '___many___'): # reserved names continue if (not entry in section.scalars) or (entry in section.defaults): # missing entries # or entries from defaults missing = True val = None if copy and entry not in section.scalars: # copy comments section.comments[entry] = ( configspec.comments.get(entry, [])) section.inline_comments[entry] = ( configspec.inline_comments.get(entry, '')) # else: missing = False val = section[entry] ret_true, ret_false = validate_entry(entry, configspec[entry], val, missing, ret_true, ret_false) many = None if '__many__' in configspec.scalars: many = configspec['__many__'] elif '___many___' in configspec.scalars: many = configspec['___many___'] if many is not None: for entry in unvalidated: val = section[entry] ret_true, ret_false = validate_entry(entry, many, val, False, ret_true, ret_false) unvalidated = [] for entry in incorrect_scalars: ret_true = False if not preserve_errors: out[entry] = False else: ret_false = False msg = 'Value %r was provided as a section' % entry out[entry] = validator.baseErrorClass(msg) for entry in incorrect_sections: ret_true = False if not preserve_errors: out[entry] = False else: ret_false = False msg = 'Section %r was provided as a single value' % entry out[entry] = validator.baseErrorClass(msg) # Missing sections will have been created as empty ones when the # configspec was read. for entry in section.sections: # FIXME: this means DEFAULT is not copied in copy mode if section is self and entry == 'DEFAULT': continue if section[entry].configspec is None: unvalidated.append(entry) continue if copy: section.comments[entry] = configspec.comments.get(entry, []) section.inline_comments[entry] = configspec.inline_comments.get(entry, '') check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry]) out[entry] = check if check == False: ret_true = False elif check == True: ret_false = False else: ret_true = False section.extra_values = unvalidated if preserve_errors and not section._created: # If the section wasn't created (i.e. it wasn't missing) # then we can't return False, we need to preserve errors ret_false = False # if ret_false and preserve_errors and out: # If we are preserving errors, but all # the failures are from missing sections / values # then we can return False. Otherwise there is a # real failure that we need to preserve. ret_false = not any(out.values()) if ret_true: return True elif ret_false: return False return out def reset(self): """Clear ConfigObj instance and restore to 'freshly created' state.""" self.clear() self._initialise() # FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload) # requires an empty dictionary self.configspec = None # Just to be sure ;-) self._original_configspec = None def reload(self): """ Reload a ConfigObj from file. This method raises a ``ReloadError`` if the ConfigObj doesn't have a filename attribute pointing to a file. """ if not isinstance(self.filename, str): raise ReloadError() filename = self.filename current_options = {} for entry in OPTION_DEFAULTS: if entry == 'configspec': continue current_options[entry] = getattr(self, entry) configspec = self._original_configspec current_options['configspec'] = configspec self.clear() self._initialise(current_options) self._load(filename, configspec) class SimpleVal(object): """ A simple validator. Can be used to check that all members expected are present. To use it, provide a configspec with all your members in (the value given will be ignored). Pass an instance of ``SimpleVal`` to the ``validate`` method of your ``ConfigObj``. ``validate`` will return ``True`` if all members are present, or a dictionary with True/False meaning present/missing. (Whole missing sections will be replaced with ``False``) """ def __init__(self): self.baseErrorClass = ConfigObjError def check(self, check, member, missing=False): """A dummy check method, always returns the value unchanged.""" if missing: raise self.baseErrorClass() return member def flatten_errors(cfg, res, levels=None, results=None): """ An example function that will turn a nested dictionary of results (as returned by ``ConfigObj.validate``) into a flat list. ``cfg`` is the ConfigObj instance being checked, ``res`` is the results dictionary returned by ``validate``. (This is a recursive function, so you shouldn't use the ``levels`` or ``results`` arguments - they are used by the function.) Returns a list of keys that failed. Each member of the list is a tuple:: ([list of sections...], key, result) If ``validate`` was called with ``preserve_errors=False`` (the default) then ``result`` will always be ``False``. *list of sections* is a flattened list of sections that the key was found in. If the section was missing (or a section was expected and a scalar provided - or vice-versa) then key will be ``None``. If the value (or section) was missing then ``result`` will be ``False``. If ``validate`` was called with ``preserve_errors=True`` and a value was present, but failed the check, then ``result`` will be the exception object returned. You can use this as a string that describes the failure. For example *The value "3" is of the wrong type*. """ if levels is None: # first time called levels = [] results = [] if res == True: return sorted(results) if res == False or isinstance(res, Exception): results.append((levels[:], None, res)) if levels: levels.pop() return sorted(results) for (key, val) in list(res.items()): if val == True: continue if isinstance(cfg.get(key), collections.Mapping): # Go down one level levels.append(key) flatten_errors(cfg[key], val, levels, results) continue results.append((levels[:], key, val)) # # Go up one level if levels: levels.pop() # return sorted(results) def get_extra_values(conf, _prepend=()): """ Find all the values and sections not in the configspec from a validated ConfigObj. ``get_extra_values`` returns a list of tuples where each tuple represents either an extra section, or an extra value. The tuples contain two values, a tuple representing the section the value is in and the name of the extra values. For extra values in the top level section the first member will be an empty tuple. For values in the 'foo' section the first member will be ``('foo',)``. For members in the 'bar' subsection of the 'foo' section the first member will be ``('foo', 'bar')``. NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't been validated it will return an empty list. """ out = [] out.extend([(_prepend, name) for name in conf.extra_values]) for name in conf.sections: if name not in conf.extra_values: out.extend(get_extra_values(conf[name], _prepend + (name,))) return out """*A programming language is a medium of expression.* - Paul Graham"""
c78413a99704e1dd71d4a39b83f45b355e92a11facbbf314cc916a68b8df1c6f
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os from distutils.version import LooseVersion from ..mpl_normalize import simple_norm from ... import log from ...io.fits import getdata def fits2bitmap(filename, ext=0, out_fn=None, stretch='linear', power=1.0, asinh_a=0.1, min_cut=None, max_cut=None, min_percent=None, max_percent=None, percent=None, cmap='Greys_r'): """ Create a bitmap file from a FITS image, applying a stretching transform between minimum and maximum cut levels and a matplotlib colormap. Parameters ---------- filename : str The filename of the FITS file. ext : int FITS extension name or number of the image to convert. The default is 0. out_fn : str The filename of the output bitmap image. The type of bitmap is determined by the filename extension (e.g. '.jpg', '.png'). The default is a PNG file with the same name as the FITS file. stretch : {{'linear', 'sqrt', 'power', log', 'asinh'}} The stretching function to apply to the image. The default is 'linear'. power : float, optional The power index for ``stretch='power'``. The default is 1.0. asinh_a : float, optional For ``stretch='asinh'``, the value where the asinh curve transitions from linear to logarithmic behavior, expressed as a fraction of the normalized image. Must be in the range between 0 and 1. The default is 0.1. min_cut : float, optional The pixel value of the minimum cut level. Data values less than ``min_cut`` will set to ``min_cut`` before stretching the image. The default is the image minimum. ``min_cut`` overrides ``min_percent``. max_cut : float, optional The pixel value of the maximum cut level. Data values greater than ``min_cut`` will set to ``min_cut`` before stretching the image. The default is the image maximum. ``max_cut`` overrides ``max_percent``. min_percent : float, optional The percentile value used to determine the pixel value of minimum cut level. The default is 0.0. ``min_percent`` overrides ``percent``. max_percent : float, optional The percentile value used to determine the pixel value of maximum cut level. The default is 100.0. ``max_percent`` overrides ``percent``. percent : float, optional The percentage of the image values used to determine the pixel values of the minimum and maximum cut levels. The lower cut level will set at the ``(100 - percent) / 2`` percentile, while the upper cut level will be set at the ``(100 + percent) / 2`` percentile. The default is 100.0. ``percent`` is ignored if either ``min_percent`` or ``max_percent`` is input. cmap : str The matplotlib color map name. The default is 'Greys_r'. """ import matplotlib import matplotlib.cm as cm import matplotlib.image as mimg # __main__ gives ext as a string try: ext = int(ext) except ValueError: pass try: image = getdata(filename, ext) except Exception as e: log.critical(e) return 1 if image.ndim != 2: log.critical('data in FITS extension {0} is not a 2D array' .format(ext)) if out_fn is None: out_fn = os.path.splitext(filename)[0] if out_fn.endswith('.fits'): out_fn = os.path.splitext(out_fn)[0] out_fn += '.png' # need to explicitly define the output format due to a bug in # matplotlib (<= 2.1), otherwise the format will always be PNG out_format = os.path.splitext(out_fn)[1][1:] # workaround for matplotlib 2.0.0 bug where png images are inverted # (mpl-#7656) if (out_format.lower() == 'png' and LooseVersion(matplotlib.__version__) == LooseVersion('2.0.0')): image = image[::-1] if cmap not in cm.datad: log.critical('{0} is not a valid matplotlib colormap name.' .format(cmap)) return 1 norm = simple_norm(image, stretch=stretch, power=power, asinh_a=asinh_a, min_cut=min_cut, max_cut=max_cut, min_percent=min_percent, max_percent=max_percent, percent=percent) mimg.imsave(out_fn, norm(image), cmap=cmap, origin='lower', format=out_format) log.info('Saved file to {0}.'.format(out_fn)) def main(args=None): import argparse parser = argparse.ArgumentParser( description='Create a bitmap file from a FITS image.') parser.add_argument('-e', '--ext', metavar='hdu', default=0, help='Specify the HDU extension number or name ' '(Default is 0).') parser.add_argument('-o', metavar='filename', type=str, default=None, help='Filename for the output image (Default is a ' 'PNG file with the same name as the FITS file).') parser.add_argument('--stretch', type=str, default='linear', help='Type of image stretching ("linear", "sqrt", ' '"power", "log", or "asinh") (Default is "linear").') parser.add_argument('--power', type=float, default=1.0, help='Power index for "power" stretching (Default is ' '1.0).') parser.add_argument('--asinh_a', type=float, default=0.1, help='The value in normalized image where the asinh ' 'curve transitions from linear to logarithmic ' 'behavior (used only for "asinh" stretch) ' '(Default is 0.1).') parser.add_argument('--min_cut', type=float, default=None, help='The pixel value of the minimum cut level ' '(Default is the image minimum).') parser.add_argument('--max_cut', type=float, default=None, help='The pixel value of the maximum cut level ' '(Default is the image maximum).') parser.add_argument('--min_percent', type=float, default=None, help='The percentile value used to determine the ' 'minimum cut level (Default is 0).') parser.add_argument('--max_percent', type=float, default=None, help='The percentile value used to determine the ' 'maximum cut level (Default is 100).') parser.add_argument('--percent', type=float, default=None, help='The percentage of the image values used to ' 'determine the pixel values of the minimum and ' 'maximum cut levels (Default is 100).') parser.add_argument('--cmap', metavar='colormap_name', type=str, default='Greys_r', help='matplotlib color map name ' '(Default is "Greys_r").') parser.add_argument('filename', nargs='+', help='Path to one or more FITS files to convert') args = parser.parse_args(args) for filename in args.filename: fits2bitmap(filename, ext=args.ext, out_fn=args.o, stretch=args.stretch, min_cut=args.min_cut, max_cut=args.max_cut, min_percent=args.min_percent, max_percent=args.max_percent, percent=args.percent, power=args.power, asinh_a=args.asinh_a, cmap=args.cmap)
7587137f95d64fbdc17c8d550ba23f03d2ba28be06e2420787a5df25cf477afe
# Licensed under a 3-clause BSD style license - see LICENSE.rst # Note: This file incldues code dervived from pywcsgrid2 # # This file contains Matplotlib transformation objects (e.g. from pixel to world # coordinates, but also world-to-world). import abc import numpy as np from matplotlib.path import Path from matplotlib.transforms import Transform from ... import units as u from ...wcs import WCS from ...wcs.utils import wcs_to_celestial_frame from ...coordinates import (SkyCoord, frame_transform_graph, SphericalRepresentation, UnitSphericalRepresentation, BaseCoordinateFrame) class CurvedTransform(Transform, metaclass=abc.ABCMeta): """ Abstract base class for non-affine curved transforms """ input_dims = 2 output_dims = 2 is_separable = False def transform_path(self, path): """ Transform a Matplotlib Path Parameters ---------- path : :class:`~matplotlib.path.Path` The path to transform Returns ------- path : :class:`~matplotlib.path.Path` The resulting path """ return Path(self.transform(path.vertices), path.codes) transform_path_non_affine = transform_path @abc.abstractmethod def transform(self, input): raise NotImplementedError("") @abc.abstractmethod def inverted(self): raise NotImplementedError("") class WCSWorld2PixelTransform(CurvedTransform): """ WCS transformation from world to pixel coordinates """ has_inverse = True def __init__(self, wcs, slice=None): super().__init__() self.wcs = wcs if self.wcs.wcs.naxis > 2: if slice is None: raise ValueError("WCS has more than 2 dimensions, so ``slice`` should be set") elif len(slice) != self.wcs.wcs.naxis: raise ValueError("slice should have as many elements as WCS " "has dimensions (should be {0})".format(self.wcs.wcs.naxis)) else: self.slice = slice self.x_index = slice.index('x') self.y_index = slice.index('y') else: self.slice = None def __eq__(self, other): return (isinstance(other, type(self)) and self.wcs == other.wcs and self.slice == other.slice) @property def input_dims(self): return self.wcs.wcs.naxis def transform(self, world): """ Transform world to pixel coordinates. You should pass in a NxM array where N is the number of points to transform, and M is the number of dimensions in the WCS. This then returns the (x, y) pixel coordinates as a Nx2 array. """ if world.shape[1] != self.wcs.wcs.naxis: raise ValueError("Second dimension of input values should match number of WCS coordinates") if world.shape[0] == 0: pixel = np.zeros((0, 2)) else: pixel = self.wcs.wcs_world2pix(world, 1) - 1 if self.slice is None: return pixel else: return pixel[:, (self.x_index, self.y_index)] transform_non_affine = transform def inverted(self): """ Return the inverse of the transform """ return WCSPixel2WorldTransform(self.wcs, slice=self.slice) class WCSPixel2WorldTransform(CurvedTransform): """ WCS transformation from pixel to world coordinates """ has_inverse = True def __init__(self, wcs, slice=None): super().__init__() self.wcs = wcs self.slice = slice if self.slice is not None: self.x_index = slice.index('x') self.y_index = slice.index('y') def __eq__(self, other): return (isinstance(other, type(self)) and self.wcs == other.wcs and self.slice == other.slice) @property def output_dims(self): return self.wcs.wcs.naxis def get_coord_slices(self, xmin, xmax, ymin, ymax, nx, ny): """ Get a coordinate slice """ x = np.linspace(xmin, xmax, nx) y = np.linspace(ymin, ymax, ny) Y, X = np.meshgrid(y, x) pixel = np.array([X.ravel(), Y.ravel()]).transpose() world = self.transform(pixel) return X, Y, [world[:, i].reshape(nx, ny).transpose() for i in range(self.wcs.wcs.naxis)] def transform(self, pixel): """ Transform pixel to world coordinates. You should pass in a Nx2 array of (x, y) pixel coordinates to transform to world coordinates. This will then return an NxM array where M is the number of dimensions in the WCS """ if self.slice is None: pixel_full = pixel.copy() else: pixel_full = [] for index in self.slice: if index == 'x': pixel_full.append(pixel[:, 0]) elif index == 'y': pixel_full.append(pixel[:, 1]) else: pixel_full.append(index) pixel_full = np.array(np.broadcast_arrays(*pixel_full)).transpose() pixel_full += 1 if pixel_full.shape[0] == 0: world = np.zeros((0, 2)) else: world = self.wcs.wcs_pix2world(pixel_full, 1) # At the moment, one has to manually check that the transformation # round-trips, otherwise it should be considered invalid. pixel_check = self.wcs.wcs_world2pix(world, 1) with np.errstate(invalid='ignore'): invalid = np.any(np.abs(pixel_check - pixel_full) > 1., axis=1) world[invalid] = np.nan return world transform_non_affine = transform def inverted(self): """ Return the inverse of the transform """ return WCSWorld2PixelTransform(self.wcs, slice=self.slice) class CoordinateTransform(CurvedTransform): has_inverse = True def __init__(self, input_system, output_system): super().__init__() self._input_system_name = input_system self._output_system_name = output_system if isinstance(self._input_system_name, WCS): self.input_system = wcs_to_celestial_frame(self._input_system_name) elif isinstance(self._input_system_name, str): self.input_system = frame_transform_graph.lookup_name(self._input_system_name) if self.input_system is None: raise ValueError("Frame {0} not found".format(self._input_system_name)) elif isinstance(self._input_system_name, BaseCoordinateFrame): self.input_system = self._input_system_name else: raise TypeError("input_system should be a WCS instance, string, or a coordinate frame instance") if isinstance(self._output_system_name, WCS): self.output_system = wcs_to_celestial_frame(self._output_system_name) elif isinstance(self._output_system_name, str): self.output_system = frame_transform_graph.lookup_name(self._output_system_name) if self.output_system is None: raise ValueError("Frame {0} not found".format(self._output_system_name)) elif isinstance(self._output_system_name, BaseCoordinateFrame): self.output_system = self._output_system_name else: raise TypeError("output_system should be a WCS instance, string, or a coordinate frame instance") if self.output_system == self.input_system: self.same_frames = True else: self.same_frames = False @property def same_frames(self): return self._same_frames @same_frames.setter def same_frames(self, same_frames): self._same_frames = same_frames def transform(self, input_coords): """ Transform one set of coordinates to another """ if self.same_frames: return input_coords x_in, y_in = input_coords[:, 0], input_coords[:, 1] c_in = SkyCoord(x_in, y_in, unit=(u.deg, u.deg), frame=self.input_system) # We often need to transform arrays that contain NaN values, and filtering # out the NaN values would have a performance hit, so instead we just pass # on all values and just ignore Numpy warnings with np.errstate(all='ignore'): c_out = c_in.transform_to(self.output_system) if issubclass(c_out.representation, (SphericalRepresentation, UnitSphericalRepresentation)): lon = c_out.data.lon.deg lat = c_out.data.lat.deg else: lon = c_out.spherical.lon.deg lat = c_out.spherical.lat.deg return np.concatenate((lon[:, np.newaxis], lat[:, np.newaxis]), axis=1) transform_non_affine = transform def inverted(self): """ Return the inverse of the transform """ return CoordinateTransform(self._output_system_name, self._input_system_name)
02f54f7aba00dbc1dc92131c012391417692d98c2c643c44954289f0a4cdd514
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This file defines the classes used to represent a 'coordinate', which includes axes, ticks, tick labels, and grid lines. """ import numpy as np from matplotlib.ticker import Formatter from matplotlib.transforms import Affine2D, ScaledTranslation from matplotlib.patches import PathPatch from matplotlib import rcParams from ... import units as u from .formatter_locator import AngleFormatterLocator, ScalarFormatterLocator from .ticks import Ticks from .ticklabels import TickLabels from .axislabels import AxisLabels from .grid_paths import get_lon_lat_path, get_gridline_path __all__ = ['CoordinateHelper'] def wrap_angle_at(values, coord_wrap): # On ARM processors, np.mod emits warnings if there are NaN values in the # array, although this doesn't seem to happen on other processors. with np.errstate(invalid='ignore'): return np.mod(values - coord_wrap, 360.) - (360. - coord_wrap) class CoordinateHelper: """ Helper class to control one of the coordinates in the :class:`~astropy.visualization.wcsaxes.WCSAxes`. Parameters ---------- parent_axes : :class:`~astropy.visualization.wcsaxes.WCSAxes` The axes the coordinate helper belongs to. parent_map : :class:`~astropy.visualization.wcsaxes.CoordinatesMap` The :class:`~astropy.visualization.wcsaxes.CoordinatesMap` object this coordinate belongs to. transform : `~matplotlib.transforms.Transform` The transform corresponding to this coordinate system. coord_index : int The index of this coordinate in the :class:`~astropy.visualization.wcsaxes.CoordinatesMap`. coord_type : {'longitude', 'latitude', 'scalar'} The type of this coordinate, which is used to determine the wrapping and boundary behavior of coordinates. Longitudes wrap at ``coord_wrap``, latitudes have to be in the range -90 to 90, and scalars are unbounded and do not wrap. coord_unit : `~astropy.units.Unit` The unit that this coordinate is in given the output of transform. coord_wrap : float The angle at which the longitude wraps (defaults to 360) frame : `~astropy.visualization.wcsaxes.frame.BaseFrame` The frame of the :class:`~astropy.visualization.wcsaxes.WCSAxes`. """ def __init__(self, parent_axes=None, parent_map=None, transform=None, coord_index=None, coord_type='scalar', coord_unit=None, coord_wrap=None, frame=None): # Keep a reference to the parent axes and the transform self.parent_axes = parent_axes self.parent_map = parent_map self.transform = transform self.coord_index = coord_index self.coord_unit = coord_unit self.frame = frame self.set_coord_type(coord_type, coord_wrap) # Initialize ticks self.dpi_transform = Affine2D() self.offset_transform = ScaledTranslation(0, 0, self.dpi_transform) self.ticks = Ticks(transform=parent_axes.transData + self.offset_transform) # Initialize tick labels self.ticklabels = TickLabels(self.frame, transform=None, # display coordinates figure=parent_axes.get_figure()) self.ticks.display_minor_ticks(False) self.minor_frequency = 5 # Initialize axis labels self.axislabels = AxisLabels(self.frame, transform=None, # display coordinates figure=parent_axes.get_figure()) # Initialize container for the grid lines self.grid_lines = [] # Initialize grid style. Take defaults from matplotlib.rcParams. # Based on matplotlib.axis.YTick._get_gridline. # # Matplotlib's gridlines use Line2D, but ours use PathPatch. # Patches take a slightly different format of linestyle argument. lines_to_patches_linestyle = {'-': 'solid', '--': 'dashed', '-.': 'dashdot', ':': 'dotted', 'none': 'none', 'None': 'none', ' ': 'none', '': 'none'} self.grid_lines_kwargs = {'visible': False, 'facecolor': 'none', 'edgecolor': rcParams['grid.color'], 'linestyle': lines_to_patches_linestyle[rcParams['grid.linestyle']], 'linewidth': rcParams['grid.linewidth'], 'alpha': rcParams.get('grid.alpha', 1.0), 'transform': self.parent_axes.transData} def grid(self, draw_grid=True, grid_type='lines', **kwargs): """ Plot grid lines for this coordinate. Standard matplotlib appearance options (color, alpha, etc.) can be passed as keyword arguments. Parameters ---------- draw_grid : bool Whether to show the gridlines grid_type : { 'lines' | 'contours' } Whether to plot the contours by determining the grid lines in world coordinates and then plotting them in world coordinates (``'lines'``) or by determining the world coordinates at many positions in the image and then drawing contours (``'contours'``). The first is recommended for 2-d images, while for 3-d (or higher dimensional) cubes, the ``'contours'`` option is recommended. """ if grid_type in ('lines', 'contours'): self._grid_type = grid_type else: raise ValueError("grid_type should be 'lines' or 'contours'") if 'color' in kwargs: kwargs['edgecolor'] = kwargs.pop('color') self.grid_lines_kwargs.update(kwargs) if self.grid_lines_kwargs['visible']: if not draw_grid: self.grid_lines_kwargs['visible'] = False else: self.grid_lines_kwargs['visible'] = True def set_coord_type(self, coord_type, coord_wrap=None): """ Set the coordinate type for the axis. Parameters ---------- coord_type : str One of 'longitude', 'latitude' or 'scalar' coord_wrap : float, optional The value to wrap at for angular coordinates """ self.coord_type = coord_type if coord_type == 'longitude' and coord_wrap is None: self.coord_wrap = 360 elif coord_type != 'longitude' and coord_wrap is not None: raise NotImplementedError('coord_wrap is not yet supported ' 'for non-longitude coordinates') else: self.coord_wrap = coord_wrap # Initialize tick formatter/locator if coord_type == 'scalar': self._coord_unit_scale = None self._formatter_locator = ScalarFormatterLocator(unit=self.coord_unit) elif coord_type in ['longitude', 'latitude']: if self.coord_unit is u.deg: self._coord_unit_scale = None else: self._coord_unit_scale = self.coord_unit.to(u.deg) self._formatter_locator = AngleFormatterLocator() else: raise ValueError("coord_type should be one of 'scalar', 'longitude', or 'latitude'") def set_major_formatter(self, formatter): """ Set the formatter to use for the major tick labels. Parameters ---------- formatter : str or Formatter The format or formatter to use. """ if isinstance(formatter, Formatter): raise NotImplementedError() # figure out how to swap out formatter elif isinstance(formatter, str): self._formatter_locator.format = formatter else: raise TypeError("formatter should be a string or a Formatter " "instance") def format_coord(self, value): """ Given the value of a coordinate, will format it according to the format of the formatter_locator. """ if not hasattr(self, "_fl_spacing"): return "" # _update_ticks has not been called yet fl = self._formatter_locator if isinstance(fl, AngleFormatterLocator): # Convert to degrees if needed if self._coord_unit_scale is not None: value *= self._coord_unit_scale if self.coord_type == 'longitude': value = wrap_angle_at(value, self.coord_wrap) value = value * u.degree value = value.to_value(fl._unit) spacing = self._fl_spacing string = fl.formatter(values=[value] * fl._unit, spacing=spacing) return string[0] def set_separator(self, separator): """ Set the separator to use for the angle major tick labels. Parameters ---------- separator : The separator between numbers in sexagesimal representation. Can be either a string or a tuple. """ if not (self._formatter_locator.__class__ == AngleFormatterLocator): raise TypeError("Separator can only be specified for angle coordinates") if isinstance(separator, str) or isinstance(separator, tuple): self._formatter_locator.sep = separator else: raise TypeError("separator should be a string or a tuple") def set_format_unit(self, unit): """ Set the unit for the major tick labels. Parameters ---------- unit : class:`~astropy.units.Unit` The unit to which the tick labels should be converted to. """ if not issubclass(unit.__class__, u.UnitBase): raise TypeError("unit should be an astropy UnitBase subclass") self._formatter_locator.format_unit = unit def set_ticks(self, values=None, spacing=None, number=None, size=None, width=None, color=None, alpha=None, exclude_overlapping=False): """ Set the location and properties of the ticks. At most one of the options from ``values``, ``spacing``, or ``number`` can be specified. Parameters ---------- values : iterable, optional The coordinate values at which to show the ticks. spacing : float, optional The spacing between ticks. number : float, optional The approximate number of ticks shown. size : float, optional The length of the ticks in points color : str or tuple A valid Matplotlib color for the ticks exclude_overlapping : bool, optional Whether to exclude tick labels that overlap over each other. """ if sum([values is None, spacing is None, number is None]) < 2: raise ValueError("At most one of values, spacing, or number should " "be specified") if values is not None: self._formatter_locator.values = values elif spacing is not None: self._formatter_locator.spacing = spacing elif number is not None: self._formatter_locator.number = number if size is not None: self.ticks.set_ticksize(size) if width is not None: self.ticks.set_linewidth(width) if color is not None: self.ticks.set_color(color) if alpha is not None: self.ticks.set_alpha(alpha) self.ticklabels.set_exclude_overlapping(exclude_overlapping) def set_ticks_position(self, position): """ Set where ticks should appear Parameters ---------- position : str The axes on which the ticks for this coordinate should appear. Should be a string containing zero or more of ``'b'``, ``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the ticks to be shown on the left and bottom axis. """ self.ticks.set_visible_axes(position) def set_ticks_visible(self, visible): """ Set whether ticks are visible or not. Parameters ---------- visible : bool The visibility of ticks. Setting as ``False`` will hide ticks along this coordinate. """ self.ticks.set_visible(visible) def set_ticklabel(self, **kwargs): """ Set the visual properties for the tick labels. Parameters ---------- kwargs Keyword arguments are passed to :class:`matplotlib.text.Text`. These can include keywords to set the ``color``, ``size``, ``weight``, and other text properties. """ self.ticklabels.set(**kwargs) def set_ticklabel_position(self, position): """ Set where tick labels should appear Parameters ---------- position : str The axes on which the tick labels for this coordinate should appear. Should be a string containing zero or more of ``'b'``, ``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the tick labels to be shown on the left and bottom axis. """ self.ticklabels.set_visible_axes(position) def set_ticklabel_visible(self, visible): """ Set whether the tick labels are visible or not. Parameters ---------- visible : bool The visibility of ticks. Setting as ``False`` will hide this coordinate's tick labels. """ self.ticklabels.set_visible(visible) def set_axislabel(self, text, minpad=1, **kwargs): """ Set the text and optionally visual properties for the axis label. Parameters ---------- text : str The axis label text. minpad : float, optional The padding for the label in terms of axis label font size. kwargs Keywords are passed to :class:`matplotlib.text.Text`. These can include keywords to set the ``color``, ``size``, ``weight``, and other text properties. """ self.axislabels.set_text(text) self.axislabels.set_minpad(minpad) self.axislabels.set(**kwargs) def get_axislabel(self): """ Get the text for the axis label Returns ------- label : str The axis label """ return self.axislabels.get_text() def set_axislabel_position(self, position): """ Set where axis labels should appear Parameters ---------- position : str The axes on which the axis label for this coordinate should appear. Should be a string containing zero or more of ``'b'``, ``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the axis label to be shown on the left and bottom axis. """ self.axislabels.set_visible_axes(position) def set_axislabel_visibility_rule(self, rule): """ Set the rule used to determine when the axis label is drawn. Parameters ---------- rule : str If the rule is 'always' axis labels will always be drawn on the axis. If the rule is 'ticks' the label will only be drawn if ticks were drawn on that axis. If the rule is 'labels' the axis label will only be drawn if tick labels were drawn on that axis. """ self.axislabels.set_visibility_rule(rule) def get_axislabel_visibility_rule(self, rule): """ Get the rule used to determine when the axis label is drawn. """ return self.axislabels.get_visibility_rule() @property def locator(self): return self._formatter_locator.locator @property def formatter(self): return self._formatter_locator.formatter def _draw_grid(self, renderer): renderer.open_group('grid lines') self._update_ticks() if self.grid_lines_kwargs['visible']: if self._grid_type == 'lines': self._update_grid_lines() else: self._update_grid_contour() if self._grid_type == 'lines': frame_patch = self.frame.patch for path in self.grid_lines: p = PathPatch(path, **self.grid_lines_kwargs) p.set_clip_path(frame_patch) p.draw(renderer) elif self._grid is not None: for line in self._grid.collections: line.set(**self.grid_lines_kwargs) line.draw(renderer) renderer.close_group('grid lines') def _draw_ticks(self, renderer, bboxes, ticklabels_bbox, ticks_locs): renderer.open_group('ticks') self.ticks.draw(renderer, ticks_locs) self.ticklabels.draw(renderer, bboxes=bboxes, ticklabels_bbox=ticklabels_bbox) renderer.close_group('ticks') def _draw_axislabels(self, renderer, bboxes, ticklabels_bbox, ticks_locs, visible_ticks): renderer.open_group('axis labels') self.axislabels.draw(renderer, bboxes=bboxes, ticklabels_bbox=ticklabels_bbox, coord_ticklabels_bbox=ticklabels_bbox[self], ticks_locs=ticks_locs, visible_ticks=visible_ticks) renderer.close_group('axis labels') def _update_ticks(self): # TODO: this method should be optimized for speed # Here we determine the location and rotation of all the ticks. For # each axis, we can check the intersections for the specific # coordinate and once we have the tick positions, we can use the WCS # to determine the rotations. # Find the range of coordinates in all directions coord_range = self.parent_map.get_coord_range() # First find the ticks we want to show tick_world_coordinates, self._fl_spacing = self.locator(*coord_range[self.coord_index]) if self.ticks.get_display_minor_ticks(): minor_ticks_w_coordinates = self._formatter_locator.minor_locator(self._fl_spacing, self.get_minor_frequency(), *coord_range[self.coord_index]) # We want to allow non-standard rectangular frames, so we just rely on # the parent axes to tell us what the bounding frame is. from . import conf frame = self.frame.sample(conf.frame_boundary_samples) self.ticks.clear() self.ticklabels.clear() self.lblinfo = [] self.lbl_world = [] # Look up parent axes' transform from data to figure coordinates. # # See: # http://matplotlib.org/users/transforms_tutorial.html#the-transformation-pipeline transData = self.parent_axes.transData invertedTransLimits = transData.inverted() for axis, spine in frame.items(): # Determine tick rotation in display coordinates and compare to # the normal angle in display coordinates. pixel0 = spine.data world0 = spine.world[:, self.coord_index] world0 = self.transform.transform(pixel0)[:, self.coord_index] axes0 = transData.transform(pixel0) # Advance 2 pixels in figure coordinates pixel1 = axes0.copy() pixel1[:, 0] += 2.0 pixel1 = invertedTransLimits.transform(pixel1) world1 = self.transform.transform(pixel1)[:, self.coord_index] # Advance 2 pixels in figure coordinates pixel2 = axes0.copy() pixel2[:, 1] += 2.0 if self.frame.origin == 'lower' else -2.0 pixel2 = invertedTransLimits.transform(pixel2) world2 = self.transform.transform(pixel2)[:, self.coord_index] dx = (world1 - world0) dy = (world2 - world0) # Rotate by 90 degrees dx, dy = -dy, dx if self._coord_unit_scale is not None: dx *= self._coord_unit_scale dy *= self._coord_unit_scale if self.coord_type == 'longitude': # Here we wrap at 180 not self.coord_wrap since we want to # always ensure abs(dx) < 180 and abs(dy) < 180 dx = wrap_angle_at(dx, 180.) dy = wrap_angle_at(dy, 180.) tick_angle = np.degrees(np.arctan2(dy, dx)) normal_angle_full = np.hstack([spine.normal_angle, spine.normal_angle[-1]]) with np.errstate(invalid='ignore'): reset = (((normal_angle_full - tick_angle) % 360 > 90.) & ((tick_angle - normal_angle_full) % 360 > 90.)) tick_angle[reset] -= 180. # We find for each interval the starting and ending coordinate, # ensuring that we take wrapping into account correctly for # longitudes. w1 = spine.world[:-1, self.coord_index] w2 = spine.world[1:, self.coord_index] if self._coord_unit_scale is not None: w1 = w1 * self._coord_unit_scale w2 = w2 * self._coord_unit_scale if self.coord_type == 'longitude': w1 = wrap_angle_at(w1, self.coord_wrap) w2 = wrap_angle_at(w2, self.coord_wrap) with np.errstate(invalid='ignore'): w1[w2 - w1 > 180.] += 360 w2[w1 - w2 > 180.] += 360 # For longitudes, we need to check ticks as well as ticks + 360, # since the above can produce pairs such as 359 to 361 or 0.5 to # 1.5, both of which would match a tick at 0.75. Otherwise we just # check the ticks determined above. self._compute_ticks(tick_world_coordinates, spine, axis, w1, w2, tick_angle) if self.ticks.get_display_minor_ticks(): self._compute_ticks(minor_ticks_w_coordinates, spine, axis, w1, w2, tick_angle, ticks='minor') # format tick labels, add to scene text = self.formatter(self.lbl_world * tick_world_coordinates.unit, spacing=self._fl_spacing) for kwargs, txt in zip(self.lblinfo, text): self.ticklabels.add(text=txt, **kwargs) def _compute_ticks(self, tick_world_coordinates, spine, axis, w1, w2, tick_angle, ticks='major'): tick_world_coordinates_values = tick_world_coordinates.value if self.coord_type == 'longitude': tick_world_coordinates_values = np.hstack([tick_world_coordinates_values, tick_world_coordinates_values + 360]) for t in tick_world_coordinates_values: # Find steps where a tick is present. We have to check # separately for the case where the tick falls exactly on the # frame points, otherwise we'll get two matches, one for w1 and # one for w2. with np.errstate(invalid='ignore'): intersections = np.hstack([np.nonzero((t - w1) == 0)[0], np.nonzero(((t - w1) * (t - w2)) < 0)[0]]) # But we also need to check for intersection with the last w2 if t - w2[-1] == 0: intersections = np.append(intersections, len(w2) - 1) # Loop over ticks, and find exact pixel coordinates by linear # interpolation for imin in intersections: imax = imin + 1 if np.allclose(w1[imin], w2[imin], rtol=1.e-13, atol=1.e-13): continue # tick is exactly aligned with frame else: frac = (t - w1[imin]) / (w2[imin] - w1[imin]) x_data_i = spine.data[imin, 0] + frac * (spine.data[imax, 0] - spine.data[imin, 0]) y_data_i = spine.data[imin, 1] + frac * (spine.data[imax, 1] - spine.data[imin, 1]) x_pix_i = spine.pixel[imin, 0] + frac * (spine.pixel[imax, 0] - spine.pixel[imin, 0]) y_pix_i = spine.pixel[imin, 1] + frac * (spine.pixel[imax, 1] - spine.pixel[imin, 1]) delta_angle = tick_angle[imax] - tick_angle[imin] if delta_angle > 180.: delta_angle -= 360. elif delta_angle < -180.: delta_angle += 360. angle_i = tick_angle[imin] + frac * delta_angle if self.coord_type == 'longitude': world = wrap_angle_at(t, self.coord_wrap) else: world = t if ticks == 'major': self.ticks.add(axis=axis, pixel=(x_data_i, y_data_i), world=world, angle=angle_i, axis_displacement=imin + frac) # store information to pass to ticklabels.add # it's faster to format many ticklabels at once outside # of the loop self.lblinfo.append(dict(axis=axis, pixel=(x_pix_i, y_pix_i), world=world, angle=spine.normal_angle[imin], axis_displacement=imin + frac)) self.lbl_world.append(world) else: self.ticks.add_minor(minor_axis=axis, minor_pixel=(x_data_i, y_data_i), minor_world=world, minor_angle=angle_i, minor_axis_displacement=imin + frac) def display_minor_ticks(self, display_minor_ticks): """ Display minor ticks for this coordinate. Parameters ---------- display_minor_ticks : bool Whether or not to display minor ticks. """ self.ticks.display_minor_ticks(display_minor_ticks) def get_minor_frequency(self): return self.minor_frequency def set_minor_frequency(self, frequency): """ Set the frequency of minor ticks per major ticks. Parameters ---------- frequency : int The number of minor ticks per major ticks. """ self.minor_frequency = frequency def _update_grid_lines(self): # For 3-d WCS with a correlated third axis, the *proper* way of # drawing a grid should be to find the world coordinates of all pixels # and drawing contours. What we are doing here assumes that we can # define the grid lines with just two of the coordinates (and # therefore assumes that the other coordinates are fixed and set to # the value in the slice). Here we basically assume that if the WCS # had a third axis, it has been abstracted away in the transformation. coord_range = self.parent_map.get_coord_range() tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index]) tick_world_coordinates_values = tick_world_coordinates.value n_coord = len(tick_world_coordinates_values) from . import conf n_samples = conf.grid_samples xy_world = np.zeros((n_samples * n_coord, 2)) self.grid_lines = [] for iw, w in enumerate(tick_world_coordinates_values): subset = slice(iw * n_samples, (iw + 1) * n_samples) if self.coord_index == 0: xy_world[subset, 0] = np.repeat(w, n_samples) xy_world[subset, 1] = np.linspace(coord_range[1][0], coord_range[1][1], n_samples) else: xy_world[subset, 0] = np.linspace(coord_range[0][0], coord_range[0][1], n_samples) xy_world[subset, 1] = np.repeat(w, n_samples) # We now convert all the world coordinates to pixel coordinates in a # single go rather than doing this in the gridline to path conversion # to fully benefit from vectorized coordinate transformations. # Currently xy_world is in deg, but transform function needs it in # native units if self._coord_unit_scale is not None: xy_world /= self._coord_unit_scale # Transform line to pixel coordinates pixel = self.transform.inverted().transform(xy_world) # Create round-tripped values for checking xy_world_round = self.transform.transform(pixel) for iw in range(n_coord): subset = slice(iw * n_samples, (iw + 1) * n_samples) self.grid_lines.append(self._get_gridline(xy_world[subset], pixel[subset], xy_world_round[subset])) def _get_gridline(self, xy_world, pixel, xy_world_round): if self.coord_type == 'scalar': return get_gridline_path(xy_world, pixel) else: return get_lon_lat_path(xy_world, pixel, xy_world_round) def _update_grid_contour(self): if hasattr(self, '_grid'): for line in self._grid.collections: line.remove() xmin, xmax = self.parent_axes.get_xlim() ymin, ymax = self.parent_axes.get_ylim() x, y, field = self.transform.get_coord_slices(xmin, xmax, ymin, ymax, 200, 200) coord_range = self.parent_map.get_coord_range() tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index]) field = field[self.coord_index] # tick_world_coordinates is a Quantities array and we only needs its values tick_world_coordinates_values = tick_world_coordinates.value if self.coord_type == 'longitude': # Find biggest gap in tick_world_coordinates and wrap in middle # For now just assume spacing is equal, so any mid-point will do mid = 0.5 * (tick_world_coordinates_values[0] + tick_world_coordinates_values[1]) field = wrap_angle_at(field, mid) tick_world_coordinates_values = wrap_angle_at(tick_world_coordinates_values, mid) # Replace wraps by NaN reset = (np.abs(np.diff(field[:, :-1], axis=0)) > 180) | (np.abs(np.diff(field[:-1, :], axis=1)) > 180) field[:-1, :-1][reset] = np.nan field[1:, :-1][reset] = np.nan field[:-1, 1:][reset] = np.nan field[1:, 1:][reset] = np.nan if len(tick_world_coordinates_values) > 0: self._grid = self.parent_axes.contour(x, y, field.transpose(), levels=np.sort(tick_world_coordinates_values)) else: self._grid = None
fc1dd12e0ca2c81aac79d2091b6470017dfd0dad31c6c533492c1f136bd504d5
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This file defines the AngleFormatterLocator class which is a class that # provides both a method for a formatter and one for a locator, for a given # label spacing. The advantage of keeping the two connected is that we need to # make sure that the formatter can correctly represent the spacing requested and # vice versa. For example, a format of dd:mm cannot work with a tick spacing # that is not a multiple of one arcminute. import re import warnings import numpy as np from matplotlib import rcParams from ... import units as u from ...coordinates import Angle DMS_RE = re.compile('^dd(:mm(:ss(.(s)+)?)?)?$') HMS_RE = re.compile('^hh(:mm(:ss(.(s)+)?)?)?$') DDEC_RE = re.compile('^d(.(d)+)?$') DMIN_RE = re.compile('^m(.(m)+)?$') DSEC_RE = re.compile('^s(.(s)+)?$') SCAL_RE = re.compile('^x(.(x)+)?$') class BaseFormatterLocator: """ A joint formatter/locator """ def __init__(self, values=None, number=None, spacing=None, format=None): if (values, number, spacing).count(None) < 2: raise ValueError("At most one of values/number/spacing can be specifed") if values is not None: self.values = values elif number is not None: self.number = number elif spacing is not None: self.spacing = spacing else: self.number = 5 self.format = format @property def values(self): return self._values @values.setter def values(self, values): if not isinstance(values, u.Quantity) or (not values.ndim == 1): raise TypeError("values should be an astropy.units.Quantity array") self._number = None self._spacing = None self._values = values @property def number(self): return self._number @number.setter def number(self, number): self._number = number self._spacing = None self._values = None @property def spacing(self): return self._spacing @spacing.setter def spacing(self, spacing): self._number = None self._spacing = spacing self._values = None def minor_locator(self, spacing, frequency, value_min, value_max): if self.values is not None: return [] * self._unit minor_spacing = spacing.value / frequency values = self._locate_values(value_min, value_max, minor_spacing) index = np.where((values % frequency) == 0) index = index[0][0] values = np.delete(values, np.s_[index::frequency]) return values * minor_spacing * self._unit @staticmethod def _locate_values(value_min, value_max, spacing): imin = np.ceil(value_min / spacing) imax = np.floor(value_max / spacing) values = np.arange(imin, imax + 1, dtype=int) return values class AngleFormatterLocator(BaseFormatterLocator): """ A joint formatter/locator """ def __init__(self, values=None, number=None, spacing=None, format=None): self._unit = u.degree self._sep = None super().__init__(values=values, number=number, spacing=spacing, format=format) @property def spacing(self): return self._spacing @spacing.setter def spacing(self, spacing): if spacing is not None and (not isinstance(spacing, u.Quantity) or spacing.unit.physical_type != 'angle'): raise TypeError("spacing should be an astropy.units.Quantity " "instance with units of angle") self._number = None self._spacing = spacing self._values = None @property def sep(self): return self._sep @sep.setter def sep(self, separator): self._sep = separator @property def format(self): return self._format @format.setter def format(self, value): self._format = value if value is None: return if DMS_RE.match(value) is not None: self._decimal = False self._unit = u.degree if '.' in value: self._precision = len(value) - value.index('.') - 1 self._fields = 3 else: self._precision = 0 self._fields = value.count(':') + 1 elif HMS_RE.match(value) is not None: self._decimal = False self._unit = u.hourangle if '.' in value: self._precision = len(value) - value.index('.') - 1 self._fields = 3 else: self._precision = 0 self._fields = value.count(':') + 1 elif DDEC_RE.match(value) is not None: self._decimal = True self._unit = u.degree self._fields = 1 if '.' in value: self._precision = len(value) - value.index('.') - 1 else: self._precision = 0 elif DMIN_RE.match(value) is not None: self._decimal = True self._unit = u.arcmin self._fields = 1 if '.' in value: self._precision = len(value) - value.index('.') - 1 else: self._precision = 0 elif DSEC_RE.match(value) is not None: self._decimal = True self._unit = u.arcsec self._fields = 1 if '.' in value: self._precision = len(value) - value.index('.') - 1 else: self._precision = 0 else: raise ValueError("Invalid format: {0}".format(value)) if self.spacing is not None and self.spacing < self.base_spacing: warnings.warn("Spacing is too small - resetting spacing to match format") self.spacing = self.base_spacing if self.spacing is not None: ratio = (self.spacing / self.base_spacing).decompose().value remainder = ratio - np.round(ratio) if abs(remainder) > 1.e-10: warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format") self.spacing = self.base_spacing * max(1, round(ratio)) @property def base_spacing(self): if self._decimal: spacing = self._unit / (10. ** self._precision) else: if self._fields == 1: spacing = 1. * u.degree elif self._fields == 2: spacing = 1. * u.arcmin elif self._fields == 3: if self._precision == 0: spacing = 1. * u.arcsec else: spacing = u.arcsec / (10. ** self._precision) if self._unit is u.hourangle: spacing *= 15 return spacing def locator(self, value_min, value_max): if self.values is not None: # values were manually specified return self.values, 1.1 * u.arcsec else: # In the special case where value_min is the same as value_max, we # don't locate any ticks. This can occur for example when taking a # slice for a cube (along the dimension sliced). if value_min == value_max: return [] * u.deg, 0 * u.arcsec if self.spacing is not None: # spacing was manually specified spacing_deg = self.spacing.to_value(u.degree) elif self.number is not None: # number of ticks was specified, work out optimal spacing # first compute the exact spacing dv = abs(float(value_max - value_min)) / self.number * u.degree if self.format is not None and dv < self.base_spacing: # if the spacing is less than the minimum spacing allowed by the format, simply # use the format precision instead. spacing_deg = self.base_spacing.to_value(u.degree) else: # otherwise we clip to the nearest 'sensible' spacing if self._unit is u.degree: from .utils import select_step_degree spacing_deg = select_step_degree(dv).to_value(u.degree) else: from .utils import select_step_hour spacing_deg = select_step_hour(dv).to_value(u.degree) # We now find the interval values as multiples of the spacing and # generate the tick positions from this. values = self._locate_values(value_min, value_max, spacing_deg) return values * spacing_deg * u.degree, spacing_deg * u.degree def formatter(self, values, spacing): if not isinstance(values, u.Quantity) and values is not None: raise TypeError("values should be a Quantities array") if len(values) > 0: if self.format is None: spacing = spacing.to_value(u.arcsec) if spacing > 3600: fields = 1 precision = 0 elif spacing > 60: fields = 2 precision = 0 elif spacing > 1: fields = 3 precision = 0 else: fields = 3 precision = -int(np.floor(np.log10(spacing))) decimal = False unit = u.degree else: fields = self._fields precision = self._precision decimal = self._decimal unit = self._unit if decimal: sep = None elif self._sep is not None: sep = self._sep else: if unit == u.degree: if rcParams['text.usetex']: deg = r'$^\circ$' else: deg = '\xb0' sep = (deg, "'", '"') else: sep = ('h', 'm', 's') angles = Angle(values) string = angles.to_string(unit=unit, precision=precision, decimal=decimal, fields=fields, sep=sep).tolist() return string else: return [] class ScalarFormatterLocator(BaseFormatterLocator): """ A joint formatter/locator """ def __init__(self, values=None, number=None, spacing=None, format=None, unit=None): if unit is not None: self._unit = unit self._format_unit = unit elif spacing is not None: self._unit = spacing.unit self._format_unit = spacing.unit elif values is not None: self._unit = values.unit self._format_unit = values.unit super().__init__(values=values, number=number, spacing=spacing, format=format) @property def format_unit(self): return self._format_unit @format_unit.setter def format_unit(self, unit): if not issubclass(unit.__class__, u.UnitBase): raise TypeError("unit should be an astropy UnitBase subclass") self._format_unit = unit @property def spacing(self): return self._spacing @spacing.setter def spacing(self, spacing): if spacing is not None and not isinstance(spacing, u.Quantity): raise TypeError("spacing should be an astropy.units.Quantity instance") self._number = None self._spacing = spacing self._values = None @property def format(self): return self._format @format.setter def format(self, value): self._format = value if value is None: return if SCAL_RE.match(value) is not None: if '.' in value: self._precision = len(value) - value.index('.') - 1 else: self._precision = 0 if self.spacing is not None and self.spacing < self.base_spacing: warnings.warn("Spacing is too small - resetting spacing to match format") self.spacing = self.base_spacing if self.spacing is not None: ratio = (self.spacing / self.base_spacing).decompose().value remainder = ratio - np.round(ratio) if abs(remainder) > 1.e-10: warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format") self.spacing = self.base_spacing * max(1, round(ratio)) elif not value.startswith('%'): raise ValueError("Invalid format: {0}".format(value)) @property def base_spacing(self): return self._unit / (10. ** self._precision) def locator(self, value_min, value_max): if self.values is not None: # values were manually specified return self.values, 1.1 * self._unit else: # In the special case where value_min is the same as value_max, we # don't locate any ticks. This can occur for example when taking a # slice for a cube (along the dimension sliced). if value_min == value_max: return [] * self._unit, 0 * self._unit if self.spacing is not None: # spacing was manually specified spacing = self.spacing.to_value(self._unit) elif self.number is not None: # number of ticks was specified, work out optimal spacing # first compute the exact spacing dv = abs(float(value_max - value_min)) / self.number if self.format is not None and (not self.format.startswith('%')) and dv < self.base_spacing.value: # if the spacing is less than the minimum spacing allowed by the format, simply # use the format precision instead. spacing = self.base_spacing.to_value(self._unit) else: from .utils import select_step_scalar spacing = select_step_scalar(dv) # We now find the interval values as multiples of the spacing and # generate the tick positions from this values = self._locate_values(value_min, value_max, spacing) return values * spacing * self._unit, spacing * self._unit def formatter(self, values, spacing): if len(values) > 0: if self.format is None: if spacing.value < 1.: precision = -int(np.floor(np.log10(spacing.value))) else: precision = 0 elif self.format.startswith('%'): return [(self.format % x.value) for x in values] else: precision = self._precision return [("{0:." + str(precision) + "f}").format(x.to_value(self._format_unit)) for x in values] else: return []
7ae67be1d46985642431807e682fd9c06745ebc3138ec7b0eb100c0f2c870774
# Licensed under a 3-clause BSD style license - see LICENSE.rst from functools import partial from collections import defaultdict import numpy as np from matplotlib.artist import Artist from matplotlib.axes import Axes, subplot_class_factory from matplotlib.transforms import Affine2D, Bbox, Transform from ...coordinates import SkyCoord, BaseCoordinateFrame from ...wcs import WCS from ...wcs.utils import wcs_to_celestial_frame from .transforms import (WCSPixel2WorldTransform, WCSWorld2PixelTransform, CoordinateTransform) from .coordinates_map import CoordinatesMap from .utils import get_coord_meta from .frame import EllipticalFrame, RectangularFrame __all__ = ['WCSAxes', 'WCSAxesSubplot'] VISUAL_PROPERTIES = ['facecolor', 'edgecolor', 'linewidth', 'alpha', 'linestyle'] IDENTITY = WCS(naxis=2) IDENTITY.wcs.ctype = ["X", "Y"] IDENTITY.wcs.crval = [0., 0.] IDENTITY.wcs.crpix = [1., 1.] IDENTITY.wcs.cdelt = [1., 1.] class _WCSAxesArtist(Artist): """This is a dummy artist to enforce the correct z-order of axis ticks, tick labels, and gridlines. FIXME: This is a bit of a hack. ``Axes.draw`` sorts the artists by zorder and then renders them in sequence. For normal Matplotlib axes, the ticks, tick labels, and gridlines are included in this list of artists and hence are automatically drawn in the correct order. However, ``WCSAxes`` disables the native ticks, labels, and gridlines. Instead, ``WCSAxes.draw`` renders ersatz ticks, labels, and gridlines by explicitly calling the functions ``CoordinateHelper._draw_ticks``, ``CoordinateHelper._draw_grid``, etc. This hack would not be necessary if ``WCSAxes`` drew ticks, tick labels, and gridlines in the standary way.""" def draw(self, renderer, *args, **kwargs): self.axes.draw_wcsaxes(renderer) class WCSAxes(Axes): """ The main axes class that can be used to show world coordinates from a WCS. Parameters ---------- fig : `~matplotlib.figure.Figure` The figure to add the axes to rect : list The position of the axes in the figure in relative units. Should be given as ``[left, bottom, width, height]``. wcs : :class:`~astropy.wcs.WCS`, optional The WCS for the data. If this is specified, ``transform`` cannot be specified. transform : `~matplotlib.transforms.Transform`, optional The transform for the data. If this is specified, ``wcs`` cannot be specified. coord_meta : dict, optional A dictionary providing additional metadata when ``transform`` is specified. This should include the keys ``type``, ``wrap``, and ``unit``. Each of these should be a list with as many items as the dimension of the WCS. The ``type`` entries should be one of ``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should give, for the longitude, the angle at which the coordinate wraps (and `None` otherwise), and the ``unit`` should give the unit of the coordinates as :class:`~astropy.units.Unit` instances. transData : `~matplotlib.transforms.Transform`, optional Can be used to override the default data -> pixel mapping. slices : tuple, optional For WCS transformations with more than two dimensions, we need to choose which dimensions are being shown in the 2D image. The slice should contain one ``x`` entry, one ``y`` entry, and the rest of the values should be integers indicating the slice through the data. The order of the items in the slice should be the same as the order of the dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means that the first WCS dimension (last Numpy dimension) will be sliced at an index of 50, the second WCS and Numpy dimension will be shown on the x axis, and the final WCS dimension (first Numpy dimension) will be shown on the y-axis (and therefore the data will be plotted using ``data[:, :, 50].transpose()``) frame_class : type, optional The class for the frame, which should be a subclass of :class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a :class:`~astropy.visualization.wcsaxes.frame.RectangularFrame` """ def __init__(self, fig, rect, wcs=None, transform=None, coord_meta=None, transData=None, slices=None, frame_class=RectangularFrame, **kwargs): super().__init__(fig, rect, **kwargs) self._bboxes = [] self.frame_class = frame_class if not (transData is None): # User wants to override the transform for the final # data->pixel mapping self.transData = transData self.reset_wcs(wcs=wcs, slices=slices, transform=transform, coord_meta=coord_meta) self._hide_parent_artists() self.format_coord = self._display_world_coords self._display_coords_index = 0 fig.canvas.mpl_connect('key_press_event', self._set_cursor_prefs) self.patch = self.coords.frame.patch self._wcsaxesartist = _WCSAxesArtist() self.add_artist(self._wcsaxesartist) self._drawn = False def _display_world_coords(self, x, y): if not self._drawn: return "" if self._display_coords_index == -1: return "%s %s (pixel)" % (x, y) pixel = np.array([x, y]) coords = self._all_coords[self._display_coords_index] world = coords._transform.transform(np.array([pixel]))[0] xw = coords[self._x_index].format_coord(world[self._x_index]) yw = coords[self._y_index].format_coord(world[self._y_index]) if self._display_coords_index == 0: system = "world" else: system = "world, overlay {0}".format(self._display_coords_index) coord_string = "%s %s (%s)" % (xw, yw, system) return coord_string def _set_cursor_prefs(self, event, **kwargs): if event.key == 'w': self._display_coords_index += 1 if self._display_coords_index + 1 > len(self._all_coords): self._display_coords_index = -1 def _hide_parent_artists(self): # Turn off spines and current axes for s in self.spines.values(): s.set_visible(False) self.xaxis.set_visible(False) self.yaxis.set_visible(False) # We now overload ``imshow`` because we need to make sure that origin is # set to ``lower`` for all images, which means that we need to flip RGB # images. def imshow(self, X, *args, **kwargs): """ Wrapper to Matplotlib's :meth:`~matplotlib.axes.Axes.imshow`. If an RGB image is passed as a PIL object, it will be flipped vertically and ``origin`` will be set to ``lower``, since WCS transformations - like FITS files - assume that the origin is the lower left pixel of the image (whereas RGB images have the origin in the top left). All arguments are passed to :meth:`~matplotlib.axes.Axes.imshow`. """ origin = kwargs.get('origin', None) if origin == 'upper': raise ValueError("Cannot use images with origin='upper' in WCSAxes.") # To check whether the image is a PIL image we can check if the data # has a 'getpixel' attribute - this is what Matplotlib's AxesImage does try: from PIL.Image import Image, FLIP_TOP_BOTTOM except ImportError: # We don't need to worry since PIL is not installed, so user cannot # have passed RGB image. pass else: if isinstance(X, Image) or hasattr(X, 'getpixel'): X = X.transpose(FLIP_TOP_BOTTOM) kwargs['origin'] = 'lower' return super().imshow(X, *args, **kwargs) def plot_coord(self, *args, **kwargs): """ Plot `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` objects onto the axes. The first argument to :meth:`~astropy.visualization.wcsaxes.WCSAxes.plot_coord` should be a coordinate, which will then be converted to the first two parameters to `matplotlib.axes.Axes.plot`. All other arguments are the same as `matplotlib.axes.Axes.plot`. If not specified a ``transform`` keyword argument will be created based on the coordinate. Parameters ---------- coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` The coordinate object to plot on the axes. This is converted to the first two arguments to `matplotlib.axes.Axes.plot`. See Also -------- matplotlib.axes.Axes.plot : This method is called from this function with all arguments passed to it. """ if isinstance(args[0], (SkyCoord, BaseCoordinateFrame)): # Extract the frame from the first argument. frame0 = args[0] if isinstance(frame0, SkyCoord): frame0 = frame0.frame plot_data = [] for coord in self.coords: if coord.coord_type == 'longitude': plot_data.append(frame0.data.lon.to_value(coord.coord_unit)) elif coord.coord_type == 'latitude': plot_data.append(frame0.data.lat.to_value(coord.coord_unit)) else: raise NotImplementedError("Coordinates cannot be plotted with this " "method because the WCS does not represent longitude/latitude.") if 'transform' in kwargs.keys(): raise TypeError("The 'transform' keyword argument is not allowed," " as it is automatically determined by the input coordinate frame.") transform = self.get_transform(frame0) kwargs.update({'transform': transform}) args = tuple(plot_data) + args[1:] super().plot(*args, **kwargs) def reset_wcs(self, wcs=None, slices=None, transform=None, coord_meta=None): """ Reset the current Axes, to use a new WCS object. """ # Here determine all the coordinate axes that should be shown. if wcs is None and transform is None: self.wcs = IDENTITY else: # We now force call 'set', which ensures the WCS object is # consistent, which will only be important if the WCS has been set # by hand. For example if the user sets a celestial WCS by hand and # forgets to set the units, WCS.wcs.set() will do this. if wcs is not None: wcs.wcs.set() self.wcs = wcs # If we are making a new WCS, we need to preserve the path object since # it may already be used by objects that have been plotted, and we need # to continue updating it. CoordinatesMap will create a new frame # instance, but we can tell that instance to keep using the old path. if hasattr(self, 'coords'): previous_frame = {'path': self.coords.frame._path, 'color': self.coords.frame.get_color(), 'linewidth': self.coords.frame.get_linewidth()} else: previous_frame = {'path': None} self.coords = CoordinatesMap(self, wcs=self.wcs, slice=slices, transform=transform, coord_meta=coord_meta, frame_class=self.frame_class, previous_frame_path=previous_frame['path']) if previous_frame['path'] is not None: self.coords.frame.set_color(previous_frame['color']) self.coords.frame.set_linewidth(previous_frame['linewidth']) self._all_coords = [self.coords] if slices is None: self.slices = ('x', 'y') self._x_index = 0 self._y_index = 1 else: self.slices = slices self._x_index = self.slices.index('x') self._y_index = self.slices.index('y') # Common default settings for Rectangular Frame if self.frame_class is RectangularFrame: for coord_index in range(len(self.slices)): if self.slices[coord_index] == 'x': self.coords[coord_index].set_axislabel_position('b') self.coords[coord_index].set_ticklabel_position('b') elif self.slices[coord_index] == 'y': self.coords[coord_index].set_axislabel_position('l') self.coords[coord_index].set_ticklabel_position('l') else: self.coords[coord_index].set_axislabel_position('') self.coords[coord_index].set_ticklabel_position('') self.coords[coord_index].set_ticks_position('') # Common default settings for Elliptical Frame elif self.frame_class is EllipticalFrame: for coord_index in range(len(self.slices)): if self.slices[coord_index] == 'x': self.coords[coord_index].set_axislabel_position('h') self.coords[coord_index].set_ticklabel_position('h') self.coords[coord_index].set_ticks_position('h') elif self.slices[coord_index] == 'y': self.coords[coord_index].set_ticks_position('c') self.coords[coord_index].set_axislabel_position('c') self.coords[coord_index].set_ticklabel_position('c') else: self.coords[coord_index].set_axislabel_position('') self.coords[coord_index].set_ticklabel_position('') self.coords[coord_index].set_ticks_position('') def draw_wcsaxes(self, renderer): # Here need to find out range of all coordinates, and update range for # each coordinate axis. For now, just assume it covers the whole sky. self._bboxes = [] # This generates a structure like [coords][axis] = [...] ticklabels_bbox = defaultdict(partial(defaultdict, list)) ticks_locs = defaultdict(partial(defaultdict, list)) visible_ticks = [] for coords in self._all_coords: coords.frame.update() for coord in coords: coord._draw_grid(renderer) for coords in self._all_coords: for coord in coords: coord._draw_ticks(renderer, bboxes=self._bboxes, ticklabels_bbox=ticklabels_bbox[coord], ticks_locs=ticks_locs[coord]) visible_ticks.extend(coord.ticklabels.get_visible_axes()) for coords in self._all_coords: for coord in coords: coord._draw_axislabels(renderer, bboxes=self._bboxes, ticklabels_bbox=ticklabels_bbox, ticks_locs=ticks_locs[coord], visible_ticks=visible_ticks) self.coords.frame.draw(renderer) def draw(self, renderer, inframe=False): # In Axes.draw, the following code can result in the xlim and ylim # values changing, so we need to force call this here to make sure that # the limits are correct before we update the patch. locator = self.get_axes_locator() if locator: pos = locator(self, renderer) self.apply_aspect(pos) else: self.apply_aspect() if self._axisbelow is True: self._wcsaxesartist.set_zorder(0.5) elif self._axisbelow is False: self._wcsaxesartist.set_zorder(2.5) else: # 'line': above patches, below lines self._wcsaxesartist.set_zorder(1.5) # We need to make sure that that frame path is up to date self.coords.frame._update_patch_path() super().draw(renderer, inframe=inframe) self._drawn = True def set_xlabel(self, label, labelpad=1, **kwargs): self.coords[self._x_index].set_axislabel(label, minpad=labelpad, **kwargs) def set_ylabel(self, label, labelpad=1, **kwargs): self.coords[self._y_index].set_axislabel(label, minpad=labelpad, **kwargs) def get_xlabel(self): return self.coords[self._x_index].get_axislabel() def get_ylabel(self): return self.coords[self._y_index].get_axislabel() def get_coords_overlay(self, frame, coord_meta=None): # Here we can't use get_transform because that deals with # pixel-to-pixel transformations when passing a WCS object. if isinstance(frame, WCS): coords = CoordinatesMap(self, frame, frame_class=self.frame_class) else: if coord_meta is None: coord_meta = get_coord_meta(frame) transform = self._get_transform_no_transdata(frame) coords = CoordinatesMap(self, transform=transform, coord_meta=coord_meta, frame_class=self.frame_class) self._all_coords.append(coords) # Common settings for overlay coords[0].set_axislabel_position('t') coords[1].set_axislabel_position('r') coords[0].set_ticklabel_position('t') coords[1].set_ticklabel_position('r') self.overlay_coords = coords return coords def get_transform(self, frame): """ Return a transform from the specified frame to display coordinates. This does not include the transData transformation Parameters ---------- frame : :class:`~astropy.wcs.WCS` or :class:`~matplotlib.transforms.Transform` or str The ``frame`` parameter can have several possible types: * :class:`~astropy.wcs.WCS` instance: assumed to be a transformation from pixel to world coordinates, where the world coordinates are the same as those in the WCS transformation used for this ``WCSAxes`` instance. This is used for example to show contours, since this involves plotting an array in pixel coordinates that are not the final data coordinate and have to be transformed to the common world coordinate system first. * :class:`~matplotlib.transforms.Transform` instance: it is assumed to be a transform to the world coordinates that are part of the WCS used to instantiate this ``WCSAxes`` instance. * ``'pixel'`` or ``'world'``: return a transformation that allows users to plot in pixel/data coordinates (essentially an identity transform) and ``world`` (the default world-to-pixel transformation used to instantiate the ``WCSAxes`` instance). * ``'fk5'`` or ``'galactic'``: return a transformation from the specified frame to the pixel/data coordinates. * :class:`~astropy.coordinates.BaseCoordinateFrame` instance. """ return self._get_transform_no_transdata(frame).inverted() + self.transData def _get_transform_no_transdata(self, frame): """ Return a transform from data to the specified frame """ if self.wcs is None and frame != 'pixel': raise ValueError('No WCS specified, so only pixel coordinates are available') if isinstance(frame, WCS): coord_in = wcs_to_celestial_frame(self.wcs) coord_out = wcs_to_celestial_frame(frame) if coord_in == coord_out: return (WCSPixel2WorldTransform(self.wcs, slice=self.slices) + WCSWorld2PixelTransform(frame)) else: return (WCSPixel2WorldTransform(self.wcs, slice=self.slices) + CoordinateTransform(self.wcs, frame) + WCSWorld2PixelTransform(frame)) elif frame == 'pixel': return Affine2D() elif isinstance(frame, Transform): pixel2world = WCSPixel2WorldTransform(self.wcs, slice=self.slices) return pixel2world + frame else: pixel2world = WCSPixel2WorldTransform(self.wcs, slice=self.slices) if frame == 'world': return pixel2world else: coordinate_transform = CoordinateTransform(self.wcs, frame) if coordinate_transform.same_frames: return pixel2world else: return pixel2world + CoordinateTransform(self.wcs, frame) def get_tightbbox(self, renderer): if not self.get_visible(): return bb = [b for b in self._bboxes if b and (b.width != 0 or b.height != 0)] if bb: _bbox = Bbox.union(bb) return _bbox else: return self.get_window_extent(renderer) def grid(self, b=None, axis='both', *, which='major', **kwargs): """ Plot gridlines for both coordinates. Standard matplotlib appearance options (color, alpha, etc.) can be passed as keyword arguments. This behaves like `matplotlib.axes.Axes` except that if no arguments are specified, the grid is shown rather than toggled. Parameters ---------- b : bool Whether to show the gridlines. """ if not hasattr(self, 'coords'): return if which != 'major': raise NotImplementedError('Plotting the grid for the minor ticks is ' 'not supported.') if axis == 'both': self.coords.grid(draw_grid=b, **kwargs) elif axis == 'x': self.coords[0].grid(draw_grid=b, **kwargs) elif axis == 'y': self.coords[1].grid(draw_grid=b, **kwargs) else: raise ValueError('axis should be one of x/y/both') # In the following, we put the generated subplot class in a temporary class and # we then inherit it - if we don't do this, the generated class appears to # belong in matplotlib, not in WCSAxes, from the API's point of view. class WCSAxesSubplot(subplot_class_factory(WCSAxes)): """ A subclass class for WCSAxes """ pass
239713085a6f8ad4773472182b2dc302ec0f361ba82921660b71502294118263
# Licensed under a 3-clause BSD style license - see LICENSE.rst # The following few lines skip this module when running tests if matplotlib is # not available (and will have no impact otherwise) try: import pytest pytest.importorskip("matplotlib") del pytest except ImportError: pass from .core import * from .coordinate_helpers import CoordinateHelper from .coordinates_map import CoordinatesMap from .patches import * from ... import config as _config class Conf(_config.ConfigNamespace): """ Configuration parameters for `astropy.visualization.wcsaxes`. """ coordinate_range_samples = _config.ConfigItem(50, 'The number of samples along each image axis when determining ' 'the range of coordinates in a plot.') frame_boundary_samples = _config.ConfigItem(1000, 'How many points to sample along the axes when determining ' 'tick locations.') grid_samples = _config.ConfigItem(1000, 'How many points to sample along grid lines.') conf = Conf()
77853076d816908fa241ef0409533fce3ea6fd5d87f741b0df8fc87baccdefbe
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from matplotlib.text import Text from .frame import RectangularFrame def sort_using(X, Y): return [x for (y, x) in sorted(zip(Y, X))] class TickLabels(Text): def __init__(self, frame, *args, **kwargs): self.clear() self._frame = frame super().__init__(*args, **kwargs) self.set_clip_on(True) self.set_visible_axes('all') self.pad = 0.3 self._exclude_overlapping = False def clear(self): self.world = {} self.pixel = {} self.angle = {} self.text = {} self.disp = {} def add(self, axis, world, pixel, angle, text, axis_displacement): if axis not in self.world: self.world[axis] = [world] self.pixel[axis] = [pixel] self.angle[axis] = [angle] self.text[axis] = [text] self.disp[axis] = [axis_displacement] else: self.world[axis].append(world) self.pixel[axis].append(pixel) self.angle[axis].append(angle) self.text[axis].append(text) self.disp[axis].append(axis_displacement) def sort(self): """ Sort by axis displacement, which allows us to figure out which parts of labels to not repeat. """ for axis in self.world: self.world[axis] = sort_using(self.world[axis], self.disp[axis]) self.pixel[axis] = sort_using(self.pixel[axis], self.disp[axis]) self.angle[axis] = sort_using(self.angle[axis], self.disp[axis]) self.text[axis] = sort_using(self.text[axis], self.disp[axis]) self.disp[axis] = sort_using(self.disp[axis], self.disp[axis]) def simplify_labels(self): """ Figure out which parts of labels can be dropped to avoid repetition. """ self.sort() for axis in self.world: t1 = self.text[axis][0] for i in range(1, len(self.world[axis])): t2 = self.text[axis][i] if len(t1) != len(t2): t1 = self.text[axis][i] continue start = 0 # In the following loop, we need to ignore the last character, # hence the len(t1) - 1. This is because if we have two strings # like 13d14m15s we want to make sure that we keep the last # part (15s) even if the two labels are identical. for j in range(len(t1) - 1): if t1[j] != t2[j]: break if t1[j] not in '-0123456789.': start = j + 1 t1 = self.text[axis][i] if start != 0: self.text[axis][i] = self.text[axis][i][start:] def set_visible_axes(self, visible_axes): self._visible_axes = visible_axes def get_visible_axes(self): if self._visible_axes == 'all': return self.world.keys() else: return [x for x in self._visible_axes if x in self.world] def set_exclude_overlapping(self, exclude_overlapping): self._exclude_overlapping = exclude_overlapping def draw(self, renderer, bboxes, ticklabels_bbox): if not self.get_visible(): return self.simplify_labels() text_size = renderer.points_to_pixels(self.get_size()) for axis in self.get_visible_axes(): for i in range(len(self.world[axis])): # In the event that the label is empty (which is not expected # but could happen in unforeseen corner cases), we should just # skip to the next label. if self.text[axis][i] == '': continue self.set_text(self.text[axis][i]) x, y = self.pixel[axis][i] if isinstance(self._frame, RectangularFrame): # This is just to preserve the current results, but can be # removed next time the reference images are re-generated. if np.abs(self.angle[axis][i]) < 45.: ha = 'right' va = 'bottom' dx = - text_size * 0.5 dy = - text_size * 0.5 elif np.abs(self.angle[axis][i] - 90.) < 45: ha = 'center' va = 'bottom' dx = 0 dy = - text_size * 1.5 elif np.abs(self.angle[axis][i] - 180.) < 45: ha = 'left' va = 'bottom' dx = text_size * 0.5 dy = - text_size * 0.5 else: ha = 'center' va = 'bottom' dx = 0 dy = text_size * 0.2 self.set_position((x + dx, y + dy)) self.set_ha(ha) self.set_va(va) else: # This is the more general code for arbitrarily oriented # axes # Set initial position and find bounding box self.set_position((x, y)) bb = super().get_window_extent(renderer) # Find width and height, as well as angle at which we # transition which side of the label we use to anchor the # label. width = bb.width height = bb.height # Project axis angle onto bounding box ax = np.cos(np.radians(self.angle[axis][i])) ay = np.sin(np.radians(self.angle[axis][i])) # Set anchor point for label if np.abs(self.angle[axis][i]) < 45.: dx = width dy = ay * height elif np.abs(self.angle[axis][i] - 90.) < 45: dx = ax * width dy = height elif np.abs(self.angle[axis][i] - 180.) < 45: dx = -width dy = ay * height else: dx = ax * width dy = -height dx *= 0.5 dy *= 0.5 # Find normalized vector along axis normal, so as to be # able to nudge the label away by a constant padding factor dist = np.hypot(dx, dy) ddx = dx / dist ddy = dy / dist dx += ddx * text_size * self.pad dy += ddy * text_size * self.pad self.set_position((x - dx, y - dy)) self.set_ha('center') self.set_va('center') bb = super().get_window_extent(renderer) # TODO: the problem here is that we might get rid of a label # that has a key starting bit such as -0:30 where the -0 # might be dropped from all other labels. if not self._exclude_overlapping or bb.count_overlaps(bboxes) == 0: super().draw(renderer) bboxes.append(bb) ticklabels_bbox[axis].append(bb)
3ded2b2d163a07f839ea817fa85e247001ad65b7c4ccfb4aa1739da24aa65858
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from matplotlib.lines import Path, Line2D from matplotlib.transforms import Affine2D from matplotlib import rcParams class Ticks(Line2D): """ Ticks are derived from Line2D, and note that ticks themselves are markers. Thus, you should use set_mec, set_mew, etc. To change the tick size (length), you need to use set_ticksize. To change the direction of the ticks (ticks are in opposite direction of ticklabels by default), use set_tick_out(False). Note that Matplotlib's defaults dictionary :data:`~matplotlib.rcParams` contains default settings (color, size, width) of the form `xtick.*` and `ytick.*`. In a WCS projection, there may not be a clear relationship between axes of the projection and 'x' or 'y' axes. For this reason, we read defaults from `xtick.*`. The following settings affect the default appearance of ticks: * `xtick.direction` * `xtick.major.size` * `xtick.major.width` * `xtick.color` """ def __init__(self, ticksize=None, tick_out=None, **kwargs): if ticksize is None: ticksize = rcParams['xtick.major.size'] self.set_ticksize(ticksize) self.set_tick_out(rcParams.get('xtick.direction', 'in') == 'out') self.clear() line2d_kwargs = {'color': rcParams['xtick.color'], # For the linewidth we need to set a default since old versions of # matplotlib don't have this. 'linewidth': rcParams.get('xtick.major.width', 1)} line2d_kwargs.update(kwargs) Line2D.__init__(self, [0.], [0.], **line2d_kwargs) self.set_visible_axes('all') self._display_minor_ticks = False def display_minor_ticks(self, display_minor_ticks): self._display_minor_ticks = display_minor_ticks def get_display_minor_ticks(self): return self._display_minor_ticks def set_tick_out(self, tick_out): """ set True if tick need to be rotated by 180 degree. """ self._tick_out = tick_out def get_tick_out(self): """ Return True if the tick will be rotated by 180 degree. """ return self._tick_out def set_ticksize(self, ticksize): """ set length of the ticks in points. """ self._ticksize = ticksize def get_ticksize(self): """ Return length of the ticks in points. """ return self._ticksize def set_visible_axes(self, visible_axes): self._visible_axes = visible_axes def get_visible_axes(self): if self._visible_axes == 'all': return self.world.keys() else: return [x for x in self._visible_axes if x in self.world] def clear(self): self.world = {} self.pixel = {} self.angle = {} self.disp = {} self.minor_world = {} self.minor_pixel = {} self.minor_angle = {} self.minor_disp = {} def add(self, axis, world, pixel, angle, axis_displacement): if axis not in self.world: self.world[axis] = [world] self.pixel[axis] = [pixel] self.angle[axis] = [angle] self.disp[axis] = [axis_displacement] else: self.world[axis].append(world) self.pixel[axis].append(pixel) self.angle[axis].append(angle) self.disp[axis].append(axis_displacement) def get_minor_world(self): return self.minor_world def add_minor(self, minor_axis, minor_world, minor_pixel, minor_angle, minor_axis_displacement): if minor_axis not in self.minor_world: self.minor_world[minor_axis] = [minor_world] self.minor_pixel[minor_axis] = [minor_pixel] self.minor_angle[minor_axis] = [minor_angle] self.minor_disp[minor_axis] = [minor_axis_displacement] else: self.minor_world[minor_axis].append(minor_world) self.minor_pixel[minor_axis].append(minor_pixel) self.minor_angle[minor_axis].append(minor_angle) self.minor_disp[minor_axis].append(minor_axis_displacement) def __len__(self): return len(self.world) _tickvert_path = Path([[0., 0.], [1., 0.]]) def draw(self, renderer, ticks_locs): """ Draw the ticks. """ if not self.get_visible(): return offset = renderer.points_to_pixels(self.get_ticksize()) self._draw_ticks(renderer, self.pixel, self.angle, offset, ticks_locs) if self._display_minor_ticks: offset = offset * 0.5 # for minor ticksize self._draw_ticks(renderer, self.minor_pixel, self.minor_angle, offset, ticks_locs) def _draw_ticks(self, renderer, pixel_array, angle_array, offset, ticks_locs): """ Draw the minor ticks. """ path_trans = self.get_transform() gc = renderer.new_gc() gc.set_foreground(self.get_color()) gc.set_alpha(self.get_alpha()) gc.set_linewidth(self.get_linewidth()) marker_scale = Affine2D().scale(offset, offset) marker_rotation = Affine2D() marker_transform = marker_scale + marker_rotation initial_angle = 180. if self.get_tick_out() else 0. for axis in self.get_visible_axes(): if axis not in pixel_array: continue for loc, angle in zip(pixel_array[axis], angle_array[axis]): # Set the rotation for this tick marker_rotation.rotate_deg(initial_angle + angle) # Draw the markers locs = path_trans.transform_non_affine(np.array([loc, loc])) renderer.draw_markers(gc, self._tickvert_path, marker_transform, Path(locs), path_trans.get_affine()) # Reset the tick rotation before moving to the next tick marker_rotation.clear() ticks_locs[axis].append(locs) gc.restore()
33c8a83bbd13bea9a05e0ce701a7bbd7168d0736a9a3143e4413d33170122ddb
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from matplotlib.patches import Polygon from ... import units as u from ...coordinates.representation import UnitSphericalRepresentation from ...coordinates.matrix_utilities import rotation_matrix, matrix_product __all__ = ['SphericalCircle'] def _rotate_polygon(lon, lat, lon0, lat0): """ Given a polygon with vertices defined by (lon, lat), rotate the polygon such that the North pole of the spherical coordinates is now at (lon0, lat0). Therefore, to end up with a polygon centered on (lon0, lat0), the polygon should initially be drawn around the North pole. """ # Create a representation object polygon = UnitSphericalRepresentation(lon=lon, lat=lat) # Determine rotation matrix to make it so that the circle is centered # on the correct longitude/latitude. m1 = rotation_matrix(-(0.5 * np.pi * u.radian - lat0), axis='y') m2 = rotation_matrix(-lon0, axis='z') transform_matrix = matrix_product(m2, m1) # Apply 3D rotation polygon = polygon.to_cartesian() polygon = polygon.transform(transform_matrix) polygon = UnitSphericalRepresentation.from_cartesian(polygon) return polygon.lon, polygon.lat class SphericalCircle(Polygon): """ Create a patch representing a spherical circle - that is, a circle that is formed of all the points that are within a certain angle of the central coordinates on a sphere. Here we assume that latitude goes from -90 to +90 This class is needed in cases where the user wants to add a circular patch to a celestial image, since otherwise the circle will be distorted, because a fixed interval in longitude corresponds to a different angle on the sky depending on the latitude. Parameters ---------- center : tuple or `~astropy.units.Quantity` This can be either a tuple of two `~astropy.units.Quantity` objects, or a single `~astropy.units.Quantity` array with two elements. radius : `~astropy.units.Quantity` The radius of the circle resolution : int, optional The number of points that make up the circle - increase this to get a smoother circle. vertex_unit : `~astropy.units.Unit` The units in which the resulting polygon should be defined - this should match the unit that the transformation (e.g. the WCS transformation) expects as input. Notes ----- Additional keyword arguments are passed to `~matplotlib.patches.Polygon` """ def __init__(self, center, radius, resolution=100, vertex_unit=u.degree, **kwargs): # Extract longitude/latitude, either from a tuple of two quantities, or # a single 2-element Quantity. longitude, latitude = center # Start off by generating the circle around the North pole lon = np.linspace(0., 2 * np.pi, resolution + 1)[:-1] * u.radian lat = np.repeat(0.5 * np.pi - radius.to_value(u.radian), resolution) * u.radian lon, lat = _rotate_polygon(lon, lat, longitude, latitude) # Extract new longitude/latitude in the requested units lon = lon.to_value(vertex_unit) lat = lat.to_value(vertex_unit) # Create polygon vertices vertices = np.array([lon, lat]).transpose() super().__init__(vertices, **kwargs)
48128c482209dfd338c89c75c7039ac4e5f8ab262e298bfe33153d1a72e342e0
# Licensed under a 3-clause BSD style license - see LICENSE.rst from .coordinate_helpers import CoordinateHelper from .transforms import WCSPixel2WorldTransform from .utils import coord_type_from_ctype from .frame import RectangularFrame from .coordinate_range import find_coordinate_range class CoordinatesMap: """ A container for coordinate helpers that represents a coordinate system. This object can be used to access coordinate helpers by index (like a list) or by name (like a dictionary). Parameters ---------- axes : :class:`~astropy.visualization.wcsaxes.WCSAxes` The axes the coordinate map belongs to. wcs : :class:`~astropy.wcs.WCS`, optional The WCS for the data. If this is specified, ``transform`` cannot be specified. transform : `~matplotlib.transforms.Transform`, optional The transform for the data. If this is specified, ``wcs`` cannot be specified. coord_meta : dict, optional A dictionary providing additional metadata when ``transform`` is specified. This should include the keys ``type``, ``wrap``, and ``unit``. Each of these should be a list with as many items as the dimension of the WCS. The ``type`` entries should be one of ``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should give, for the longitude, the angle at which the coordinate wraps (and `None` otherwise), and the ``unit`` should give the unit of the coordinates as :class:`~astropy.units.Unit` instances. slice : tuple, optional For WCS transformations with more than two dimensions, we need to choose which dimensions are being shown in the 2D image. The slice should contain one ``x`` entry, one ``y`` entry, and the rest of the values should be integers indicating the slice through the data. The order of the items in the slice should be the same as the order of the dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means that the first WCS dimension (last Numpy dimension) will be sliced at an index of 50, the second WCS and Numpy dimension will be shown on the x axis, and the final WCS dimension (first Numpy dimension) will be shown on the y-axis (and therefore the data will be plotted using ``data[:, :, 50].transpose()``) frame_class : type, optional The class for the frame, which should be a subclass of :class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a :class:`~astropy.visualization.wcsaxes.frame.RectangularFrame` previous_frame_path : `~matplotlib.path.Path`, optional When changing the WCS of the axes, the frame instance will change but we might want to keep re-using the same underlying matplotlib `~matplotlib.path.Path` - in that case, this can be passed to this keyword argument. """ def __init__(self, axes, wcs=None, transform=None, coord_meta=None, slice=None, frame_class=RectangularFrame, previous_frame_path=None): # Keep track of parent axes and WCS self._axes = axes if wcs is None: if transform is None: raise ValueError("Either `wcs` or `transform` are required") if coord_meta is None: raise ValueError("`coord_meta` is required when " "`transform` is passed") self._transform = transform naxis = 2 else: if transform is not None: raise ValueError("Cannot specify both `wcs` and `transform`") if coord_meta is not None: raise ValueError("Cannot pass `coord_meta` if passing `wcs`") self._transform = WCSPixel2WorldTransform(wcs, slice=slice) naxis = wcs.wcs.naxis self.frame = frame_class(axes, self._transform, path=previous_frame_path) # Set up coordinates self._coords = [] self._aliases = {} for coord_index in range(naxis): # Extract coordinate metadata from WCS object or transform if wcs is not None: coord_type, coord_wrap = coord_type_from_ctype(wcs.wcs.ctype[coord_index]) coord_unit = wcs.wcs.cunit[coord_index] name = wcs.wcs.ctype[coord_index][:4].replace('-', '') else: try: coord_type = coord_meta['type'][coord_index] coord_wrap = coord_meta['wrap'][coord_index] coord_unit = coord_meta['unit'][coord_index] name = coord_meta['name'][coord_index] except IndexError: raise ValueError("coord_meta items should have a length of {0}".format(len(wcs.wcs.naxis))) self._coords.append(CoordinateHelper(parent_axes=axes, parent_map=self, transform=self._transform, coord_index=coord_index, coord_type=coord_type, coord_wrap=coord_wrap, coord_unit=coord_unit, frame=self.frame)) # Set up aliases for coordinates self._aliases[name.lower()] = coord_index def __getitem__(self, item): if isinstance(item, str): return self._coords[self._aliases[item.lower()]] else: return self._coords[item] def set_visible(self, visibility): raise NotImplementedError() def __iter__(self): for coord in self._coords: yield coord def grid(self, draw_grid=True, grid_type='lines', **kwargs): """ Plot gridlines for both coordinates. Standard matplotlib appearance options (color, alpha, etc.) can be passed as keyword arguments. Parameters ---------- draw_grid : bool Whether to show the gridlines grid_type : { 'lines' | 'contours' } Whether to plot the contours by determining the grid lines in world coordinates and then plotting them in world coordinates (``'lines'``) or by determining the world coordinates at many positions in the image and then drawing contours (``'contours'``). The first is recommended for 2-d images, while for 3-d (or higher dimensional) cubes, the ``'contours'`` option is recommended. """ for coord in self: coord.grid(draw_grid=draw_grid, grid_type=grid_type, **kwargs) def get_coord_range(self): xmin, xmax = self._axes.get_xlim() ymin, ymax = self._axes.get_ylim() return find_coordinate_range(self._transform, [xmin, xmax, ymin, ymax], [coord.coord_type for coord in self], [coord.coord_unit for coord in self])
7e7d67e4a6ab7ea99f13926ebe2f51472dab3fac41715913a36da20aff29254a
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from ... import units as u from ...coordinates import BaseCoordinateFrame # Modified from axis_artist, supports astropy.units def select_step_degree(dv): # Modified from axis_artist, supports astropy.units if dv > 1. * u.arcsec: degree_limits_ = [1.5, 3, 7, 13, 20, 40, 70, 120, 270, 520] degree_steps_ = [1, 2, 5, 10, 15, 30, 45, 90, 180, 360] degree_units = [u.degree] * len(degree_steps_) minsec_limits_ = [1.5, 2.5, 3.5, 8, 11, 18, 25, 45] minsec_steps_ = [1, 2, 3, 5, 10, 15, 20, 30] minute_limits_ = np.array(minsec_limits_) / 60. minute_units = [u.arcmin] * len(minute_limits_) second_limits_ = np.array(minsec_limits_) / 3600. second_units = [u.arcsec] * len(second_limits_) degree_limits = np.concatenate([second_limits_, minute_limits_, degree_limits_]) degree_steps = minsec_steps_ + minsec_steps_ + degree_steps_ degree_units = second_units + minute_units + degree_units n = degree_limits.searchsorted(dv.to(u.degree)) step = degree_steps[n] unit = degree_units[n] return step * unit else: return select_step_scalar(dv.to_value(u.arcsec)) * u.arcsec def select_step_hour(dv): if dv > 15. * u.arcsec: hour_limits_ = [1.5, 2.5, 3.5, 5, 7, 10, 15, 21, 36] hour_steps_ = [1, 2, 3, 4, 6, 8, 12, 18, 24] hour_units = [u.hourangle] * len(hour_steps_) minsec_limits_ = [1.5, 2.5, 3.5, 4.5, 5.5, 8, 11, 14, 18, 25, 45] minsec_steps_ = [1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30] minute_limits_ = np.array(minsec_limits_) / 60. minute_units = [15. * u.arcmin] * len(minute_limits_) second_limits_ = np.array(minsec_limits_) / 3600. second_units = [15. * u.arcsec] * len(second_limits_) hour_limits = np.concatenate([second_limits_, minute_limits_, hour_limits_]) hour_steps = minsec_steps_ + minsec_steps_ + hour_steps_ hour_units = second_units + minute_units + hour_units n = hour_limits.searchsorted(dv.to(u.hourangle)) step = hour_steps[n] unit = hour_units[n] return step * unit else: return select_step_scalar(dv.to_value(15. * u.arcsec)) * (15. * u.arcsec) def select_step_scalar(dv): log10_dv = np.log10(dv) base = np.floor(log10_dv) frac = log10_dv - base steps = np.log10([1, 2, 5, 10]) imin = np.argmin(np.abs(frac - steps)) return 10. ** (base + steps[imin]) def get_coord_meta(frame): coord_meta = {} coord_meta['type'] = ('longitude', 'latitude') coord_meta['wrap'] = (None, None) coord_meta['unit'] = (u.deg, u.deg) from astropy.coordinates import frame_transform_graph if isinstance(frame, str): initial_frame = frame frame = frame_transform_graph.lookup_name(frame) if frame is None: raise ValueError("Unknown frame: {0}".format(initial_frame)) if not isinstance(frame, BaseCoordinateFrame): frame = frame() names = list(frame.representation_component_names.keys()) coord_meta['name'] = names[:2] return coord_meta def coord_type_from_ctype(ctype): """ Determine whether a particular WCS ctype corresponds to an angle or scalar coordinate. """ if ctype[:4] in ['RA--'] or ctype[1:4] == 'LON': return 'longitude', None elif ctype[:4] in ['HPLN']: return 'longitude', 180. elif ctype[:4] in ['DEC-', 'HPLT'] or ctype[1:4] == 'LAT': return 'latitude', None else: return 'scalar', None
1932983c893529fe7a80ed3f103bbd33b559572e0a18b5b60f901e560ef518c4
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from matplotlib.text import Text import matplotlib.transforms as mtransforms from .frame import RectangularFrame class AxisLabels(Text): def __init__(self, frame, minpad=1, *args, **kwargs): self._frame = frame super().__init__(*args, **kwargs) self.set_clip_on(True) self.set_visible_axes('all') self.set_ha('center') self.set_va('center') self._minpad = minpad self._visibility_rule = 'labels' def get_minpad(self, axis): try: return self._minpad[axis] except TypeError: return self._minpad def set_visible_axes(self, visible_axes): self._visible_axes = visible_axes def get_visible_axes(self): if self._visible_axes == 'all': return self._frame.keys() else: return [x for x in self._visible_axes if x in self._frame] def set_minpad(self, minpad): self._minpad = minpad def set_visibility_rule(self, value): allowed = ['always', 'labels', 'ticks'] if value not in allowed: raise ValueError("Axis label visibility rule must be one of{}".format(' / '.join(allowed))) self._visibility_rule = value def get_visibility_rule(self): return self._visibility_rule def draw(self, renderer, bboxes, ticklabels_bbox, coord_ticklabels_bbox, ticks_locs, visible_ticks): if not self.get_visible(): return text_size = renderer.points_to_pixels(self.get_size()) for axis in self.get_visible_axes(): # Flatten the bboxes for all coords and all axes ticklabels_bbox_list = [] for bbcoord in ticklabels_bbox.values(): for bbaxis in bbcoord.values(): ticklabels_bbox_list += bbaxis if self.get_visibility_rule() == 'ticks': if not ticks_locs[axis]: continue elif self.get_visibility_rule() == 'labels': if not coord_ticklabels_bbox: continue padding = text_size * self.get_minpad(axis) # Find position of the axis label. For now we pick the mid-point # along the path but in future we could allow this to be a # parameter. x_disp, y_disp = self._frame[axis].pixel[:, 0], self._frame[axis].pixel[:, 1] d = np.hstack([0., np.cumsum(np.sqrt(np.diff(x_disp) ** 2 + np.diff(y_disp) ** 2))]) xcen = np.interp(d[-1] / 2., d, x_disp) ycen = np.interp(d[-1] / 2., d, y_disp) # Find segment along which the mid-point lies imin = np.searchsorted(d, d[-1] / 2.) - 1 # Find normal of the axis label facing outwards on that segment normal_angle = self._frame[axis].normal_angle[imin] + 180. label_angle = (normal_angle - 90.) % 360. if 135 < label_angle < 225: label_angle += 180 self.set_rotation(label_angle) # Find label position by looking at the bounding box of ticks' # labels and the image. It sets the default padding at 1 times the # axis label font size which can also be changed by setting # the minpad parameter. if isinstance(self._frame, RectangularFrame): if len(ticklabels_bbox_list) > 0 and ticklabels_bbox_list[0] is not None: coord_ticklabels_bbox[axis] = [mtransforms.Bbox.union(ticklabels_bbox_list)] else: coord_ticklabels_bbox[axis] = [None] if axis == 'l': if axis in visible_ticks and coord_ticklabels_bbox[axis][0] is not None: left = coord_ticklabels_bbox[axis][0].xmin else: left = xcen xpos = left - padding self.set_position((xpos, ycen)) elif axis == 'r': if axis in visible_ticks and coord_ticklabels_bbox[axis][0] is not None: right = coord_ticklabels_bbox[axis][0].x1 else: right = xcen xpos = right + padding self.set_position((xpos, ycen)) elif axis == 'b': if axis in visible_ticks and coord_ticklabels_bbox[axis][0] is not None: bottom = coord_ticklabels_bbox[axis][0].ymin else: bottom = ycen ypos = bottom - padding self.set_position((xcen, ypos)) elif axis == 't': if axis in visible_ticks and coord_ticklabels_bbox[axis][0] is not None: top = coord_ticklabels_bbox[axis][0].y1 else: top = ycen ypos = top + padding self.set_position((xcen, ypos)) else: # arbitrary axis dx = np.cos(np.radians(normal_angle)) * (padding + text_size * 1.5) dy = np.sin(np.radians(normal_angle)) * (padding + text_size * 1.5) self.set_position((xcen + dx, ycen + dy)) super().draw(renderer) bb = super().get_window_extent(renderer) bboxes.append(bb)
2cacbdf93ebb3323c8be009e1d368750e46ce7f789d2ac3ff602ff4033205710
# Licensed under a 3-clause BSD style license - see LICENSE.rst import warnings import numpy as np from ... import units as u # Algorithm inspired by PGSBOX from WCSLIB by M. Calabretta def wrap_180(values): values_new = values % 360. with np.errstate(invalid='ignore'): values_new[values_new > 180.] -= 360 return values_new def find_coordinate_range(transform, extent, coord_types, coord_units): """ Find the range of coordinates to use for ticks/grids Parameters ---------- transform : func Function to transform pixel to world coordinates. Should take two values (the pixel coordinates) and return two values (the world coordinates). extent : iterable The range of the image viewport in pixel coordinates, given as [xmin, xmax, ymin, ymax]. coord_types : list of str Whether each coordinate is a ``'longitude'``, ``'latitude'``, or ``'scalar'`` value. coord_units : list of `astropy.units.Unit` The units for each coordinate """ # Sample coordinates on a NX x NY grid. from . import conf nx = ny = conf.coordinate_range_samples x = np.linspace(extent[0], extent[1], nx + 1) y = np.linspace(extent[2], extent[3], ny + 1) xp, yp = np.meshgrid(x, y) world = transform.transform(np.vstack([xp.ravel(), yp.ravel()]).transpose()) ranges = [] for coord_index, coord_type in enumerate(coord_types): xw = world[:, coord_index].reshape(xp.shape) if coord_type in ['longitude', 'latitude']: unit = coord_units[coord_index] xw = xw * unit.to(u.deg) # Iron out coordinates along first row wjump = xw[0, 1:] - xw[0, :-1] with np.errstate(invalid='ignore'): reset = np.abs(wjump) > 180. if np.any(reset): wjump = wjump + np.sign(wjump) * 180. wjump = 360. * (wjump / 360.).astype(int) xw[0, 1:][reset] -= wjump[reset] # Now iron out coordinates along all columns, starting with first row. wjump = xw[1:] - xw[:1] with np.errstate(invalid='ignore'): reset = np.abs(wjump) > 180. if np.any(reset): wjump = wjump + np.sign(wjump) * 180. wjump = 360. * (wjump / 360.).astype(int) xw[1:][reset] -= wjump[reset] with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) xw_min = np.nanmin(xw) xw_max = np.nanmax(xw) # Check if range is smaller when normalizing to the range 0 to 360 if coord_type in ['longitude', 'latitude']: with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) xw_min_check = np.nanmin(xw % 360.) xw_max_check = np.nanmax(xw % 360.) if xw_max_check - xw_min_check <= xw_max - xw_min < 360.: xw_min = xw_min_check xw_max = xw_max_check # Check if range is smaller when normalizing to the range -180 to 180 if coord_type in ['longitude', 'latitude']: with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) xw_min_check = np.nanmin(wrap_180(xw)) xw_max_check = np.nanmax(wrap_180(xw)) if xw_max_check - xw_min_check < 360. and xw_max - xw_min >= xw_max_check - xw_min_check: xw_min = xw_min_check xw_max = xw_max_check x_range = xw_max - xw_min if coord_type == 'longitude': if x_range > 300.: xw_min = 0. xw_max = 360 - np.spacing(360.) elif xw_min < 0.: xw_min = max(-180., xw_min - 0.1 * x_range) xw_max = min(+180., xw_max + 0.1 * x_range) else: xw_min = max(0., xw_min - 0.1 * x_range) xw_max = min(360., xw_max + 0.1 * x_range) elif coord_type == 'latitude': xw_min = max(-90., xw_min - 0.1 * x_range) xw_max = min(+90., xw_max + 0.1 * x_range) ranges.append((xw_min, xw_max)) return ranges
adecab2487a946d1aacbcb234bf0d4eff5c2f2e13743cdf9c029206e3e79888c
# Licensed under a 3-clause BSD style license - see LICENSE.rst import abc from collections import OrderedDict import numpy as np from matplotlib.lines import Line2D, Path from matplotlib.patches import PathPatch __all__ = ['Spine', 'BaseFrame', 'RectangularFrame', 'EllipticalFrame'] class Spine: """ A single side of an axes. This does not need to be a straight line, but represents a 'side' when determining which part of the frame to put labels and ticks on. """ def __init__(self, parent_axes, transform): self.parent_axes = parent_axes self.transform = transform self.data = None self.pixel = None self.world = None @property def data(self): return self._data @data.setter def data(self, value): if value is None: self._data = None self._pixel = None self._world = None else: self._data = value self._pixel = self.parent_axes.transData.transform(self._data) self._world = self.transform.transform(self._data) self._update_normal() @property def pixel(self): return self._pixel @pixel.setter def pixel(self, value): if value is None: self._data = None self._pixel = None self._world = None else: self._data = self.parent_axes.transData.inverted().transform(self._data) self._pixel = value self._world = self.transform.transform(self._data) self._update_normal() @property def world(self): return self._world @world.setter def world(self, value): if value is None: self._data = None self._pixel = None self._world = None else: self._data = self.transform.transform(value) self._pixel = self.parent_axes.transData.transform(self._data) self._world = value self._update_normal() def _update_normal(self): # Find angle normal to border and inwards, in display coordinate dx = self.pixel[1:, 0] - self.pixel[:-1, 0] dy = self.pixel[1:, 1] - self.pixel[:-1, 1] self.normal_angle = np.degrees(np.arctan2(dx, -dy)) class BaseFrame(OrderedDict, metaclass=abc.ABCMeta): """ Base class for frames, which are collections of :class:`~astropy.visualization.wcsaxes.frame.Spine` instances. """ def __init__(self, parent_axes, transform, path=None): super().__init__() self.parent_axes = parent_axes self._transform = transform self._linewidth = 1 self._color = 'black' self._path = path for axis in self.spine_names: self[axis] = Spine(parent_axes, transform) @property def origin(self): ymin, ymax = self.parent_axes.get_ylim() return 'lower' if ymin < ymax else 'upper' @property def transform(self): return self._transform @transform.setter def transform(self, value): self._transform = value for axis in self: self[axis].transform = value def _update_patch_path(self): self.update_spines() x, y = [], [] for axis in self: x.append(self[axis].data[:, 0]) y.append(self[axis].data[:, 1]) vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose() if self._path is None: self._path = Path(vertices) else: self._path.vertices = vertices @property def patch(self): self._update_patch_path() return PathPatch(self._path, transform=self.parent_axes.transData, facecolor='white', edgecolor='white') def draw(self, renderer): for axis in self: x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1] line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000) line.draw(renderer) def sample(self, n_samples): self.update_spines() spines = OrderedDict() for axis in self: data = self[axis].data p = np.linspace(0., 1., data.shape[0]) p_new = np.linspace(0., 1., n_samples) spines[axis] = Spine(self.parent_axes, self.transform) spines[axis].data = np.array([np.interp(p_new, p, data[:, 0]), np.interp(p_new, p, data[:, 1])]).transpose() return spines def set_color(self, color): """ Sets the color of the frame. Parameters ---------- color : string The color of the frame. """ self._color = color def get_color(self): return self._color def set_linewidth(self, linewidth): """ Sets the linewidth of the frame. Parameters ---------- linewidth : float The linewidth of the frame in points. """ self._linewidth = linewidth def get_linewidth(self): return self._linewidth @abc.abstractmethod def update_spines(self): raise NotImplementedError("") class RectangularFrame(BaseFrame): """ A classic rectangular frame. """ spine_names = 'brtl' def update_spines(self): xmin, xmax = self.parent_axes.get_xlim() ymin, ymax = self.parent_axes.get_ylim() self['b'].data = np.array(([xmin, ymin], [xmax, ymin])) self['r'].data = np.array(([xmax, ymin], [xmax, ymax])) self['t'].data = np.array(([xmax, ymax], [xmin, ymax])) self['l'].data = np.array(([xmin, ymax], [xmin, ymin])) class EllipticalFrame(BaseFrame): """ An elliptical frame. """ spine_names = 'chv' def update_spines(self): xmin, xmax = self.parent_axes.get_xlim() ymin, ymax = self.parent_axes.get_ylim() xmid = 0.5 * (xmax + xmin) ymid = 0.5 * (ymax + ymin) dx = xmid - xmin dy = ymid - ymin theta = np.linspace(0., 2 * np.pi, 1000) self['c'].data = np.array([xmid + dx * np.cos(theta), ymid + dy * np.sin(theta)]).transpose() self['h'].data = np.array([np.linspace(xmin, xmax, 1000), np.repeat(ymid, 1000)]).transpose() self['v'].data = np.array([np.repeat(xmid, 1000), np.linspace(ymin, ymax, 1000)]).transpose() def _update_patch_path(self): """Override path patch to include only the outer ellipse, not the major and minor axes in the middle.""" self.update_spines() vertices = self['c'].data if self._path is None: self._path = Path(vertices) else: self._path.vertices = vertices def draw(self, renderer): """Override to draw only the outer ellipse, not the major and minor axes in the middle. FIXME: we may want to add a general method to give the user control over which spines are drawn.""" axis = 'c' x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1] line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000) line.draw(renderer)
b11d682c733b4feb412af5d578871bbc6179c4b838adb18c489aac46587b2b66
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from matplotlib.lines import Path from ...coordinates.angle_utilities import angular_separation # Tolerance for WCS round-tripping ROUND_TRIP_TOL = 1e-1 # Tolerance for discontinuities relative to the median DISCONT_FACTOR = 10. def get_lon_lat_path(lon_lat, pixel, lon_lat_check): """ Draw a curve, taking into account discontinuities. Parameters ---------- lon_lat : `~numpy.ndarray` The longitude and latitude values along the curve, given as a (n,2) array. pixel : `~numpy.ndarray` The pixel coordinates corresponding to ``lon_lat`` lon_lat_check : `~numpy.ndarray` The world coordinates derived from converting from ``pixel``, which is used to ensure round-tripping. """ # In some spherical projections, some parts of the curve are 'behind' or # 'in front of' the plane of the image, so we find those by reversing the # transformation and finding points where the result is not consistent. sep = angular_separation(np.radians(lon_lat[:, 0]), np.radians(lon_lat[:, 1]), np.radians(lon_lat_check[:, 0]), np.radians(lon_lat_check[:, 1])) with np.errstate(invalid='ignore'): sep[sep > np.pi] -= 2. * np.pi mask = np.abs(sep > ROUND_TRIP_TOL) # Mask values with invalid pixel positions mask = mask | np.isnan(pixel[:, 0]) | np.isnan(pixel[:, 1]) # We can now start to set up the codes for the Path. codes = np.zeros(lon_lat.shape[0], dtype=np.uint8) codes[:] = Path.LINETO codes[0] = Path.MOVETO codes[mask] = Path.MOVETO # Also need to move to point *after* a hidden value codes[1:][mask[:-1]] = Path.MOVETO # We now go through and search for discontinuities in the curve that would # be due to the curve going outside the field of view, invalid WCS values, # or due to discontinuities in the projection. # We start off by pre-computing the step in pixel coordinates from one # point to the next. The idea is to look for large jumps that might indicate # discontinuities. step = np.sqrt((pixel[1:, 0] - pixel[:-1, 0]) ** 2 + (pixel[1:, 1] - pixel[:-1, 1]) ** 2) # We search for discontinuities by looking for places where the step # is larger by more than a given factor compared to the median # discontinuous = step > DISCONT_FACTOR * np.median(step) discontinuous = step[1:] > DISCONT_FACTOR * step[:-1] # Skip over discontinuities codes[2:][discontinuous] = Path.MOVETO # The above missed the first step, so check that too if step[0] > DISCONT_FACTOR * step[1]: codes[1] = Path.MOVETO # Create the path path = Path(pixel, codes=codes) return path def get_gridline_path(world, pixel): """ Draw a grid line Parameters ---------- world : `~numpy.ndarray` The longitude and latitude values along the curve, given as a (n,2) array. pixel : `~numpy.ndarray` The pixel coordinates corresponding to ``lon_lat`` """ # Mask values with invalid pixel positions mask = np.isnan(pixel[:, 0]) | np.isnan(pixel[:, 1]) # We can now start to set up the codes for the Path. codes = np.zeros(world.shape[0], dtype=np.uint8) codes[:] = Path.LINETO codes[0] = Path.MOVETO codes[mask] = Path.MOVETO # Also need to move to point *after* a hidden value codes[1:][mask[:-1]] = Path.MOVETO # We now go through and search for discontinuities in the curve that would # be due to the curve going outside the field of view, invalid WCS values, # or due to discontinuities in the projection. # Create the path path = Path(pixel, codes=codes) return path
c875e51ce4ac6e6e98f768f8ef02ed480fd43e707cc85f09bb040c741ae02b8b
# Licensed under a 3-clause BSD style license - see LICENSE.rst from numpy.testing import assert_allclose try: import matplotlib.pyplot as plt HAS_PLT = True except ImportError: HAS_PLT = False try: import scipy # noqa HAS_SCIPY = True except ImportError: HAS_SCIPY = False import pytest import numpy as np from .. import hist from ...stats import histogram @pytest.mark.skipif('not HAS_PLT') def test_hist_basic(rseed=0): rng = np.random.RandomState(rseed) x = rng.randn(100) for range in [None, (-2, 2)]: n1, bins1, patches1 = plt.hist(x, 10, range=range) n2, bins2, patches2 = hist(x, 10, range=range) assert_allclose(n1, n2) assert_allclose(bins1, bins2) @pytest.mark.skipif('not HAS_PLT') def test_hist_specify_ax(rseed=0): rng = np.random.RandomState(rseed) x = rng.randn(100) fig, ax = plt.subplots(2) n1, bins1, patches1 = hist(x, 10, ax=ax[0]) assert patches1[0].axes is ax[0] n2, bins2, patches2 = hist(x, 10, ax=ax[1]) assert patches2[0].axes is ax[1] @pytest.mark.skipif('not HAS_PLT') def test_hist_autobin(rseed=0): rng = np.random.RandomState(rseed) x = rng.randn(100) # 'knuth' bintype depends on scipy that is optional dependency if HAS_SCIPY: bintypes = [10, np.arange(-3, 3, 10), 'knuth', 'scott', 'freedman', 'blocks'] else: bintypes = [10, np.arange(-3, 3, 10), 'scott', 'freedman', 'blocks'] for bintype in bintypes: for range in [None, (-3, 3)]: n1, bins1 = histogram(x, bintype, range=range) n2, bins2, patches = hist(x, bintype, range=range) assert_allclose(n1, n2) assert_allclose(bins1, bins2)
a3b99aaa0c81eccfee689d176702d45b9f96a9cdf98a53eced23a95cf926c277
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from ...utils import NumpyRNGContext from ..interval import (ManualInterval, MinMaxInterval, PercentileInterval, AsymmetricPercentileInterval, ZScaleInterval) class TestInterval: data = np.linspace(-20., 60., 100) def test_manual(self): interval = ManualInterval(-10., +15.) vmin, vmax = interval.get_limits(self.data) np.testing.assert_allclose(vmin, -10.) np.testing.assert_allclose(vmax, +15.) def test_manual_defaults(self): interval = ManualInterval(vmin=-10.) vmin, vmax = interval.get_limits(self.data) np.testing.assert_allclose(vmin, -10.) np.testing.assert_allclose(vmax, np.max(self.data)) interval = ManualInterval(vmax=15.) vmin, vmax = interval.get_limits(self.data) np.testing.assert_allclose(vmin, np.min(self.data)) np.testing.assert_allclose(vmax, 15.) def test_manual_zero_limit(self): # Regression test for a bug that caused ManualInterval to compute the # limit (min or max) if it was set to zero. interval = ManualInterval(vmin=0, vmax=0) vmin, vmax = interval.get_limits(self.data) np.testing.assert_allclose(vmin, 0) np.testing.assert_allclose(vmax, 0) def test_manual_defaults_with_nan(self): interval = ManualInterval() data = np.copy(self.data) data[0] = np.nan vmin, vmax = interval.get_limits(self.data) np.testing.assert_allclose(vmin, -20) np.testing.assert_allclose(vmax, +60) def test_minmax(self): interval = MinMaxInterval() vmin, vmax = interval.get_limits(self.data) np.testing.assert_allclose(vmin, -20.) np.testing.assert_allclose(vmax, +60.) def test_percentile(self): interval = PercentileInterval(62.2) vmin, vmax = interval.get_limits(self.data) np.testing.assert_allclose(vmin, -4.88) np.testing.assert_allclose(vmax, 44.88) def test_asymmetric_percentile(self): interval = AsymmetricPercentileInterval(10.5, 70.5) vmin, vmax = interval.get_limits(self.data) np.testing.assert_allclose(vmin, -11.6) np.testing.assert_allclose(vmax, 36.4) def test_asymmetric_percentile_nsamples(self): with NumpyRNGContext(12345): interval = AsymmetricPercentileInterval(10.5, 70.5, n_samples=20) vmin, vmax = interval.get_limits(self.data) np.testing.assert_allclose(vmin, -14.367676767676768) np.testing.assert_allclose(vmax, 40.266666666666666) class TestIntervalList(TestInterval): # Make sure intervals work with lists data = np.linspace(-20., 60., 100).tolist() class TestInterval2D(TestInterval): # Make sure intervals work with 2d arrays data = np.linspace(-20., 60., 100).reshape(100, 1) def test_zscale(): np.random.seed(42) data = np.random.randn(100, 100) * 5 + 10 interval = ZScaleInterval() vmin, vmax = interval.get_limits(data) np.testing.assert_allclose(vmin, -9.6, atol=0.1) np.testing.assert_allclose(vmax, 25.4, atol=0.1) data = list(range(1000)) + [np.nan] interval = ZScaleInterval() vmin, vmax = interval.get_limits(data) np.testing.assert_allclose(vmin, 0, atol=0.1) np.testing.assert_allclose(vmax, 999, atol=0.1) data = list(range(100)) interval = ZScaleInterval() vmin, vmax = interval.get_limits(data) np.testing.assert_allclose(vmin, 0, atol=0.1) np.testing.assert_allclose(vmax, 99, atol=0.1) def test_integers(): # Need to make sure integers get cast to float interval = MinMaxInterval() values = interval([1, 3, 4, 5, 6]) np.testing.assert_allclose(values, [0., 0.4, 0.6, 0.8, 1.0]) # Don't accept integer array in output out = np.zeros(5, dtype=int) with pytest.raises(TypeError) as exc: values = interval([1, 3, 4, 5, 6], out=out) assert exc.value.args[0] == ("Can only do in-place scaling for " "floating-point arrays") # But integer input and floating point output is fine out = np.zeros(5, dtype=float) interval([1, 3, 4, 5, 6], out=out) np.testing.assert_allclose(out, [0., 0.4, 0.6, 0.8, 1.0]) def test_constant_data(): """Test intervals with constant data (avoiding divide-by-zero).""" shape = (10, 10) data = np.ones(shape) interval = MinMaxInterval() limits = interval.get_limits(data) values = interval(data) np.testing.assert_allclose(limits, (1., 1.)) np.testing.assert_allclose(values, np.zeros(shape))
843e948928d58ca746724f8ab894b8650b5b3774f10233f59328a63a6f71ad90
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from ..stretch import (LinearStretch, SqrtStretch, PowerStretch, PowerDistStretch, SquaredStretch, LogStretch, AsinhStretch, SinhStretch, HistEqStretch, ContrastBiasStretch) DATA = np.array([0.00, 0.25, 0.50, 0.75, 1.00]) RESULTS = {} RESULTS[LinearStretch()] = np.array([0.00, 0.25, 0.50, 0.75, 1.00]) RESULTS[SqrtStretch()] = np.array([0., 0.5, 0.70710678, 0.8660254, 1.]) RESULTS[SquaredStretch()] = np.array([0., 0.0625, 0.25, 0.5625, 1.]) RESULTS[PowerStretch(0.5)] = np.array([0., 0.5, 0.70710678, 0.8660254, 1.]) RESULTS[PowerDistStretch()] = np.array([0., 0.004628, 0.030653, 0.177005, 1.]) RESULTS[LogStretch()] = np.array([0., 0.799776, 0.899816, 0.958408, 1.]) RESULTS[AsinhStretch()] = np.array([0., 0.549402, 0.77127, 0.904691, 1.]) RESULTS[SinhStretch()] = np.array([0., 0.082085, 0.212548, 0.46828, 1.]) RESULTS[ContrastBiasStretch(contrast=2., bias=0.4)] = np.array([-0.3, 0.2, 0.7, 1.2, 1.7]) RESULTS[HistEqStretch(DATA)] = DATA RESULTS[HistEqStretch(DATA[::-1])] = DATA RESULTS[HistEqStretch(DATA ** 0.5)] = np.array([0., 0.125, 0.25, 0.5674767, 1.]) class TestStretch: @pytest.mark.parametrize('stretch', RESULTS.keys()) def test_no_clip(self, stretch): np.testing.assert_allclose(stretch(DATA, clip=False), RESULTS[stretch], atol=1.e-6) @pytest.mark.parametrize('ndim', [2, 3]) @pytest.mark.parametrize('stretch', RESULTS.keys()) def test_clip_ndimensional(self, stretch, ndim): new_shape = DATA.shape + (1,) * ndim np.testing.assert_allclose(stretch(DATA.reshape(new_shape), clip=True).ravel(), np.clip(RESULTS[stretch], 0., 1), atol=1.e-6) @pytest.mark.parametrize('stretch', RESULTS.keys()) def test_clip(self, stretch): np.testing.assert_allclose(stretch(DATA, clip=True), np.clip(RESULTS[stretch], 0., 1), atol=1.e-6) @pytest.mark.parametrize('stretch', RESULTS.keys()) def test_inplace(self, stretch): data_in = DATA.copy() result = np.zeros(DATA.shape) stretch(data_in, out=result, clip=False) np.testing.assert_allclose(result, RESULTS[stretch], atol=1.e-6) np.testing.assert_allclose(data_in, DATA) @pytest.mark.parametrize('stretch', RESULTS.keys()) def test_round_trip(self, stretch): np.testing.assert_allclose(stretch.inverse(stretch(DATA, clip=False), clip=False), DATA) @pytest.mark.parametrize('stretch', RESULTS.keys()) def test_inplace_roundtrip(self, stretch): result = np.zeros(DATA.shape) stretch(DATA, out=result, clip=False) stretch.inverse(result, out=result, clip=False) np.testing.assert_allclose(result, DATA) @pytest.mark.parametrize('stretch', RESULTS.keys()) def test_double_inverse(self, stretch): np.testing.assert_allclose(stretch.inverse.inverse(DATA), stretch(DATA), atol=1.e-6) def test_inverted(self): stretch_1 = SqrtStretch().inverse stretch_2 = PowerStretch(2) np.testing.assert_allclose(stretch_1(DATA), stretch_2(DATA)) def test_chaining(self): stretch_1 = SqrtStretch() + SqrtStretch() stretch_2 = PowerStretch(0.25) stretch_3 = PowerStretch(4.) np.testing.assert_allclose(stretch_1(DATA), stretch_2(DATA)) np.testing.assert_allclose(stretch_1.inverse(DATA), stretch_3(DATA)) def test_clip_invalid(): stretch = SqrtStretch() values = stretch([-1., 0., 0.5, 1., 1.5]) np.testing.assert_allclose(values, [0., 0., 0.70710678, 1., 1.]) values = stretch([-1., 0., 0.5, 1., 1.5], clip=False) np.testing.assert_allclose(values, [np.nan, 0., 0.70710678, 1., 1.2247448])
b402127848afbd6c3cee827c4ea01c431f81492bf61098763d392ff71b7b25c7
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import io import pytest try: import matplotlib.pyplot as plt except ImportError: HAS_PLT = False else: HAS_PLT = True from ... import units as u from ..units import quantity_support @pytest.mark.skipif('not HAS_PLT') def test_units(): plt.figure() with quantity_support(): buff = io.BytesIO() plt.plot([1, 2, 3] * u.m, [3, 4, 5] * u.kg) plt.plot([105, 210, 315] * u.cm, [3050, 3025, 3010] * u.g) # Also test fill_between, which requires actual conversion to ndarray # with numpy >=1.10 (#4654). plt.fill_between([1, 3] * u.m, [3, 5] * u.kg, [3050, 3010] * u.g) plt.savefig(buff, format='svg') assert plt.gca().xaxis.get_units() == u.m assert plt.gca().yaxis.get_units() == u.kg plt.clf() @pytest.mark.skipif('not HAS_PLT') def test_incompatible_units(): plt.figure() with quantity_support(): plt.plot([1, 2, 3] * u.m) with pytest.raises(u.UnitConversionError): plt.plot([105, 210, 315] * u.kg) plt.clf()
af0343f017d8a9a37e8f266e4b9726eed9bf07016887173533a90ed46fddca53
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tests for RGB Images """ import sys import os import tempfile import pytest import numpy as np from numpy.testing import assert_equal from ...convolution import convolve, Gaussian2DKernel from .. import lupton_rgb try: import matplotlib # noqa HAS_MATPLOTLIB = True except ImportError: HAS_MATPLOTLIB = False # Set display=True to get matplotlib imshow windows to help with debugging. display = False def display_rgb(rgb, title=None): """Display an rgb image using matplotlib (useful for debugging)""" import matplotlib.pyplot as plt plt.imshow(rgb, interpolation='nearest', origin='lower') if title: plt.title(title) plt.show() return plt def saturate(image, satValue): """ Return image with all points above satValue set to NaN. Simulates saturation on an image, so we can test 'replace_saturated_pixels' """ result = image.copy() saturated = image > satValue result[saturated] = np.nan return result def random_array(dtype, N=100): return np.array(np.random.random(10)*100, dtype=dtype) def test_compute_intensity_1_float(): image_r = random_array(np.float64) intensity = lupton_rgb.compute_intensity(image_r) assert image_r.dtype == intensity.dtype assert_equal(image_r, intensity) def test_compute_intensity_1_uint(): image_r = random_array(np.uint8) intensity = lupton_rgb.compute_intensity(image_r) assert image_r.dtype == intensity.dtype assert_equal(image_r, intensity) def test_compute_intensity_3_float(): image_r = random_array(np.float64) image_g = random_array(np.float64) image_b = random_array(np.float64) intensity = lupton_rgb.compute_intensity(image_r, image_g, image_b) assert image_r.dtype == intensity.dtype assert_equal(intensity, (image_r+image_g+image_b)/3.0) def test_compute_intensity_3_uint(): image_r = random_array(np.uint8) image_g = random_array(np.uint8) image_b = random_array(np.uint8) intensity = lupton_rgb.compute_intensity(image_r, image_g, image_b) assert image_r.dtype == intensity.dtype assert_equal(intensity, (image_r+image_g+image_b)//3) class TestLuptonRgb: """A test case for Rgb""" def setup_method(self, method): np.random.seed(1000) # so we always get the same images. self.min_, self.stretch_, self.Q = 0, 5, 20 # asinh width, height = 85, 75 self.width = width self.height = height shape = (width, height) image_r = np.zeros(shape) image_g = np.zeros(shape) image_b = np.zeros(shape) # pixel locations, values and colors points = [[15, 15], [50, 45], [30, 30], [45, 15]] values = [1000, 5500, 600, 20000] g_r = [1.0, -1.0, 1.0, 1.0] r_i = [2.0, -0.5, 2.5, 1.0] # Put pixels in the images. for p, v, gr, ri in zip(points, values, g_r, r_i): image_r[p[0], p[1]] = v*pow(10, 0.4*ri) image_g[p[0], p[1]] = v*pow(10, 0.4*gr) image_b[p[0], p[1]] = v # convolve the image with a reasonable PSF, and add Gaussian background noise def convolve_with_noise(image, psf): convolvedImage = convolve(image, psf, boundary='extend', normalize_kernel=True) randomImage = np.random.normal(0, 2, image.shape) return randomImage + convolvedImage psf = Gaussian2DKernel(2.5) self.image_r = convolve_with_noise(image_r, psf) self.image_g = convolve_with_noise(image_g, psf) self.image_b = convolve_with_noise(image_b, psf) def test_Asinh(self): """Test creating an RGB image using an asinh stretch""" asinhMap = lupton_rgb.AsinhMapping(self.min_, self.stretch_, self.Q) rgbImage = asinhMap.make_rgb_image(self.image_r, self.image_g, self.image_b) if display: display_rgb(rgbImage, title=sys._getframe().f_code.co_name) def test_AsinhZscale(self): """Test creating an RGB image using an asinh stretch estimated using zscale""" map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b) rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b) if display: display_rgb(rgbImage, title=sys._getframe().f_code.co_name) def test_AsinhZscaleIntensity(self): """Test creating an RGB image using an asinh stretch estimated using zscale on the intensity""" map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b) rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b) if display: display_rgb(rgbImage, title=sys._getframe().f_code.co_name) def test_AsinhZscaleIntensityPedestal(self): """Test creating an RGB image using an asinh stretch estimated using zscale on the intensity where the images each have a pedestal added""" pedestal = [100, 400, -400] self.image_r += pedestal[0] self.image_g += pedestal[1] self.image_b += pedestal[2] map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b, pedestal=pedestal) rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b) if display: display_rgb(rgbImage, title=sys._getframe().f_code.co_name) def test_AsinhZscaleIntensityBW(self): """Test creating a black-and-white image using an asinh stretch estimated using zscale on the intensity""" map = lupton_rgb.AsinhZScaleMapping(self.image_r) rgbImage = map.make_rgb_image(self.image_r, self.image_r, self.image_r) if display: display_rgb(rgbImage, title=sys._getframe().f_code.co_name) @pytest.mark.skipif('not HAS_MATPLOTLIB') def test_make_rgb(self): """Test the function that does it all""" satValue = 1000.0 with tempfile.NamedTemporaryFile(suffix=".png") as temp: red = saturate(self.image_r, satValue) green = saturate(self.image_g, satValue) blue = saturate(self.image_b, satValue) lupton_rgb.make_lupton_rgb(red, green, blue, self.min_, self.stretch_, self.Q, filename=temp) assert os.path.exists(temp.name) def test_make_rgb_saturated_fix(self): pytest.skip('saturation correction is not implemented') satValue = 1000.0 # TODO: Cannot test with these options yet, as that part of the code is not implemented. with tempfile.NamedTemporaryFile(suffix=".png") as temp: red = saturate(self.image_r, satValue) green = saturate(self.image_g, satValue) blue = saturate(self.image_b, satValue) lupton_rgb.make_lupton_rgb(red, green, blue, self.min_, self.stretch_, self.Q, saturated_border_width=1, saturated_pixel_value=2000, filename=temp) def test_linear(self): """Test using a specified linear stretch""" map = lupton_rgb.LinearMapping(-8.45, 13.44) rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b) if display: display_rgb(rgbImage, title=sys._getframe().f_code.co_name) def test_linear_min_max(self): """Test using a min/max linear stretch determined from one image""" map = lupton_rgb.LinearMapping(image=self.image_b) rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b) if display: display_rgb(rgbImage, title=sys._getframe().f_code.co_name) def test_saturated(self): """Test interpolationolating saturated pixels""" pytest.skip('replaceSaturatedPixels is not implemented in astropy yet') satValue = 1000.0 self.image_r = saturate(self.image_r, satValue) self.image_g = saturate(self.image_g, satValue) self.image_b = saturate(self.image_b, satValue) lupton_rgb.replaceSaturatedPixels(self.image_r, self.image_g, self.image_b, 1, 2000) # Check that we replaced those NaNs with some reasonable value assert np.isfinite(self.image_r.getImage().getArray()).all() assert np.isfinite(self.image_g.getImage().getArray()).all() assert np.isfinite(self.image_b.getImage().getArray()).all() # Prepare for generating an output file self.imagesR = self.imagesR.getImage() self.imagesR = self.imagesG.getImage() self.imagesR = self.imagesB.getImage() asinhMap = lupton_rgb.AsinhMapping(self.min_, self.stretch_, self.Q) rgbImage = asinhMap.make_rgb_image(self.image_r, self.image_g, self.image_b) if display: display_rgb(rgbImage, title=sys._getframe().f_code.co_name) def test_different_shapes_asserts(self): with pytest.raises(ValueError) as excinfo: # just swap the dimensions to get a differently-shaped 'r' image_r = self.image_r.reshape(self.height, self.width) lupton_rgb.make_lupton_rgb(image_r, self.image_g, self.image_b) assert "shapes must match" in str(excinfo.value)
218d88373f72a064535ca2a7eba7c8fab50bfe20a4cb098591c066b431beef48
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy import ma from numpy.testing import assert_allclose from ..mpl_normalize import ImageNormalize, simple_norm from ..interval import ManualInterval from ..stretch import SqrtStretch try: import matplotlib # pylint: disable=W0611 HAS_MATPLOTLIB = True except ImportError: HAS_MATPLOTLIB = False DATA = np.linspace(0., 15., 6) DATA2 = np.arange(3) DATA2SCL = 0.5 * DATA2 @pytest.mark.skipif('HAS_MATPLOTLIB') def test_normalize_error_message(): with pytest.raises(ImportError) as exc: ImageNormalize() assert (exc.value.args[0] == "matplotlib is required in order to use " "this class.") @pytest.mark.skipif('not HAS_MATPLOTLIB') class TestNormalize: def test_invalid_interval(self): with pytest.raises(TypeError): ImageNormalize(vmin=2., vmax=10., interval=ManualInterval, clip=True) def test_invalid_stretch(self): with pytest.raises(TypeError): ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch, clip=True) def test_scalar(self): norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(), clip=True) norm2 = ImageNormalize(data=6, interval=ManualInterval(2, 10), stretch=SqrtStretch(), clip=True) assert_allclose(norm(6), 0.70710678) assert_allclose(norm(6), norm2(6)) def test_clip(self): norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(), clip=True) norm2 = ImageNormalize(DATA, interval=ManualInterval(2, 10), stretch=SqrtStretch(), clip=True) output = norm(DATA) expected = [0., 0.35355339, 0.70710678, 0.93541435, 1., 1.] assert_allclose(output, expected) assert_allclose(output.mask, [0, 0, 0, 0, 0, 0]) assert_allclose(output, norm2(DATA)) def test_noclip(self): norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(), clip=False) norm2 = ImageNormalize(DATA, interval=ManualInterval(2, 10), stretch=SqrtStretch(), clip=False) output = norm(DATA) expected = [np.nan, 0.35355339, 0.70710678, 0.93541435, 1.11803399, 1.27475488] assert_allclose(output, expected) assert_allclose(output.mask, [0, 0, 0, 0, 0, 0]) assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:]) assert_allclose(output, norm2(DATA)) def test_implicit_autoscale(self): norm = ImageNormalize(vmin=None, vmax=10., stretch=SqrtStretch(), clip=False) norm2 = ImageNormalize(DATA, interval=ManualInterval(None, 10), stretch=SqrtStretch(), clip=False) output = norm(DATA) assert norm.vmin == np.min(DATA) assert norm.vmax == 10. assert_allclose(output, norm2(DATA)) norm = ImageNormalize(vmin=2., vmax=None, stretch=SqrtStretch(), clip=False) norm2 = ImageNormalize(DATA, interval=ManualInterval(2, None), stretch=SqrtStretch(), clip=False) output = norm(DATA) assert norm.vmin == 2. assert norm.vmax == np.max(DATA) assert_allclose(output, norm2(DATA)) def test_masked_clip(self): mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0]) norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(), clip=True) norm2 = ImageNormalize(mdata, interval=ManualInterval(2, 10), stretch=SqrtStretch(), clip=True) output = norm(mdata) expected = [0., 0.35355339, 1., 0.93541435, 1., 1.] assert_allclose(output.filled(-10), expected) assert_allclose(output.mask, [0, 0, 0, 0, 0, 0]) assert_allclose(output, norm2(mdata)) def test_masked_noclip(self): mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0]) norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(), clip=False) norm2 = ImageNormalize(mdata, interval=ManualInterval(2, 10), stretch=SqrtStretch(), clip=False) output = norm(mdata) expected = [np.nan, 0.35355339, -10, 0.93541435, 1.11803399, 1.27475488] assert_allclose(output.filled(-10), expected) assert_allclose(output.mask, [0, 0, 1, 0, 0, 0]) assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:]) assert_allclose(output, norm2(mdata)) @pytest.mark.skipif('not HAS_MATPLOTLIB') class TestImageScaling: def test_linear(self): """Test linear scaling.""" norm = simple_norm(DATA2, stretch='linear') assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.e-5) def test_sqrt(self): """Test sqrt scaling.""" norm = simple_norm(DATA2, stretch='sqrt') assert_allclose(norm(DATA2), np.sqrt(DATA2SCL), atol=0, rtol=1.e-5) def test_power(self): """Test power scaling.""" power = 3.0 norm = simple_norm(DATA2, stretch='power', power=power) assert_allclose(norm(DATA2), DATA2SCL ** power, atol=0, rtol=1.e-5) def test_log(self): """Test log10 scaling.""" norm = simple_norm(DATA2, stretch='log') ref = np.log10(1000 * DATA2SCL + 1.0) / np.log10(1001.0) assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5) def test_asinh(self): """Test asinh scaling.""" a = 0.1 norm = simple_norm(DATA2, stretch='asinh', asinh_a=a) ref = np.arcsinh(DATA2SCL / a) / np.arcsinh(1. / a) assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5) def test_min(self): """Test linear scaling.""" norm = simple_norm(DATA2, stretch='linear', min_cut=1.) assert_allclose(norm(DATA2), [0., 0., 1.], atol=0, rtol=1.e-5) def test_percent(self): """Test percent keywords.""" norm = simple_norm(DATA2, stretch='linear', percent=99.) assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.e-5) norm2 = simple_norm(DATA2, stretch='linear', min_percent=0.5, max_percent=99.5) assert_allclose(norm(DATA2), norm2(DATA2), atol=0, rtol=1.e-5) def test_invalid_stretch(self): """Test invalid stretch keyword.""" with pytest.raises(ValueError): simple_norm(DATA2, stretch='invalid')
176cb7acd49aa02bac33dc90cb1a3ae6f66dddcf6b363229b8998225d966d8bc
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from ....io import fits try: import matplotlib # pylint: disable=W0611 import matplotlib.image as mpimg HAS_MATPLOTLIB = True from ..fits2bitmap import fits2bitmap, main except ImportError: HAS_MATPLOTLIB = False @pytest.mark.skipif('not HAS_MATPLOTLIB') class TestFits2Bitmap: def setup_class(self): self.filename = 'test.fits' def test_function(self, tmpdir): filename = tmpdir.join(self.filename).strpath fits.writeto(filename, np.ones((128, 128))) fits2bitmap(filename) def test_script(self, tmpdir): filename = tmpdir.join(self.filename).strpath fits.writeto(filename, np.ones((128, 128))) main([filename, '-e', '0']) def test_exten_num(self, tmpdir): filename = tmpdir.join(self.filename).strpath data = np.ones((100, 100)) hdu1 = fits.PrimaryHDU() hdu2 = fits.ImageHDU(data) hdulist = fits.HDUList([hdu1, hdu2]) hdulist.writeto(filename) main([filename, '-e', '1']) def test_exten_name(self, tmpdir): filename = tmpdir.join(self.filename).strpath data = np.ones((100, 100)) hdu1 = fits.PrimaryHDU() extname = 'SCI' hdu2 = fits.ImageHDU(data) hdu2.header['EXTNAME'] = extname hdulist = fits.HDUList([hdu1, hdu2]) hdulist.writeto(filename) main([filename, '-e', extname]) @pytest.mark.parametrize('file_exten', ['.gz', '.bz2']) def test_compressed_fits(self, tmpdir, file_exten): filename = tmpdir.join('test.fits' + file_exten).strpath fits.writeto(filename, np.ones((128, 128))) main([filename, '-e', '0']) def test_orientation(self, tmpdir): """ Regression test to check the image vertical orientation/origin. """ filename = tmpdir.join(self.filename).strpath out_filename = 'fits2bitmap_test.png' out_filename = tmpdir.join(out_filename).strpath data = np.zeros((32, 32)) data[0:16, :] = 1. fits.writeto(filename, data) main([filename, '-e', '0', '-o', out_filename]) img = mpimg.imread(out_filename) assert img[0, 0, 0] == 0 assert img[31, 31, 0] == 1
bf708267e02a32c030f58d4931651288d395fae3134727ab0d8f59bc902de6c8
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import warnings import pytest import numpy as np import matplotlib.pyplot as plt from .... import units as u from ....wcs import WCS from ....io import fits from ....coordinates import SkyCoord from ....tests.helper import catch_warnings from ....tests.image_tests import ignore_matplotlibrc from ..core import WCSAxes from ..utils import get_coord_meta DATA = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data')) @ignore_matplotlibrc def test_grid_regression(): # Regression test for a bug that meant that if the rc parameter # axes.grid was set to True, WCSAxes would crash upon initalization. plt.rc('axes', grid=True) fig = plt.figure(figsize=(3, 3)) WCSAxes(fig, [0.1, 0.1, 0.8, 0.8]) @ignore_matplotlibrc def test_format_coord_regression(tmpdir): # Regression test for a bug that meant that if format_coord was called by # Matplotlib before the axes were drawn, an error occurred. fig = plt.figure(figsize=(3, 3)) ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8]) fig.add_axes(ax) assert ax.format_coord(10, 10) == "" assert ax.coords[0].format_coord(10) == "" assert ax.coords[1].format_coord(10) == "" fig.savefig(tmpdir.join('nothing').strpath) assert ax.format_coord(10, 10) == "10.0 10.0 (world)" assert ax.coords[0].format_coord(10) == "10.0" assert ax.coords[1].format_coord(10) == "10.0" TARGET_HEADER = fits.Header.fromstring(""" NAXIS = 2 NAXIS1 = 200 NAXIS2 = 100 CTYPE1 = 'RA---MOL' CRPIX1 = 500 CRVAL1 = 180.0 CDELT1 = -0.4 CUNIT1 = 'deg ' CTYPE2 = 'DEC--MOL' CRPIX2 = 400 CRVAL2 = 0.0 CDELT2 = 0.4 CUNIT2 = 'deg ' COORDSYS= 'icrs ' """, sep='\n') @ignore_matplotlibrc def test_no_numpy_warnings(tmpdir): # Make sure that no warnings are raised if some pixels are outside WCS # (since this is normal) ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER)) ax.imshow(np.zeros((100, 200))) ax.coords.grid(color='white') with catch_warnings(RuntimeWarning) as ws: plt.savefig(tmpdir.join('test.png').strpath) # For debugging for w in ws: print(w) assert len(ws) == 0 @ignore_matplotlibrc def test_invalid_frame_overlay(): # Make sure a nice error is returned if a frame doesn't exist ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER)) with pytest.raises(ValueError) as exc: ax.get_coords_overlay('banana') assert exc.value.args[0] == 'Unknown frame: banana' with pytest.raises(ValueError) as exc: get_coord_meta('banana') assert exc.value.args[0] == 'Unknown frame: banana' @ignore_matplotlibrc def test_plot_coord_transform(): twoMASS_k_header = os.path.join(DATA, '2MASS_k_header') twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header) fig = plt.figure(figsize=(6, 6)) ax = fig.add_axes([0.15, 0.15, 0.8, 0.8], projection=WCS(twoMASS_k_header), aspect='equal') ax.set_xlim(-0.5, 720.5) ax.set_ylim(-0.5, 720.5) c = SkyCoord(359.76045223*u.deg, 0.26876217*u.deg) with pytest.raises(TypeError): ax.plot_coord(c, 'o', transform=ax.get_transform('galactic')) @ignore_matplotlibrc def test_set_label_properties(): # Regression test to make sure that arguments passed to # set_xlabel/set_ylabel are passed to the underlying coordinate helpers ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER)) ax.set_xlabel('Test x label', labelpad=2, color='red') ax.set_ylabel('Test y label', labelpad=3, color='green') assert ax.coords[0].axislabels.get_text() == 'Test x label' assert ax.coords[0].axislabels.get_minpad('b') == 2 assert ax.coords[0].axislabels.get_color() == 'red' assert ax.coords[1].axislabels.get_text() == 'Test y label' assert ax.coords[1].axislabels.get_minpad('l') == 3 assert ax.coords[1].axislabels.get_color() == 'green' GAL_HEADER = fits.Header.fromstring(""" SIMPLE = T / conforms to FITS standard BITPIX = -32 / array data type NAXIS = 3 / number of array dimensions NAXIS1 = 31 NAXIS2 = 2881 NAXIS3 = 480 EXTEND = T CTYPE1 = 'DISTMOD ' CRVAL1 = 3.5 CDELT1 = 0.5 CRPIX1 = 1.0 CTYPE2 = 'GLON-CAR' CRVAL2 = 180.0 CDELT2 = -0.125 CRPIX2 = 1.0 CTYPE3 = 'GLAT-CAR' CRVAL3 = 0.0 CDELT3 = 0.125 CRPIX3 = 241.0 """, sep='\n') @ignore_matplotlibrc def test_slicing_warnings(tmpdir): # Regression test to make sure that no warnings are emitted by the tick # locator for the sliced axis when slicing a cube. # Scalar case wcs3d = WCS(naxis=3) wcs3d.wcs.ctype = ['x', 'y', 'z'] wcs3d.wcs.cunit = ['deg', 'deg', 'km/s'] wcs3d.wcs.crpix = [614.5, 856.5, 333] wcs3d.wcs.cdelt = [6.25, 6.25, 23] wcs3d.wcs.crval = [0., 0., 1.] with warnings.catch_warnings(record=True) as warning_lines: warnings.resetwarnings() plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1)) plt.savefig(tmpdir.join('test.png').strpath) # For easy debugging if there are indeed warnings for warning in warning_lines: print(warning) assert len(warning_lines) == 0 # Angle case wcs3d = WCS(GAL_HEADER) with warnings.catch_warnings(record=True) as warning_lines: warnings.resetwarnings() plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 2)) plt.savefig(tmpdir.join('test.png').strpath) # For easy debugging if there are indeed warnings for warning in warning_lines: print(warning) assert len(warning_lines) == 0
c4a53b0ee436381b7f30d426507c535faddcf20fbfea4924b5bfcb59f4f54ca6
# Licensed under a 3-clause BSD style license - see LICENSE.rst from numpy.testing import assert_almost_equal from .... import units as u from ..utils import (select_step_degree, select_step_hour, select_step_scalar, coord_type_from_ctype) from ....tests.helper import (assert_quantity_allclose as assert_almost_equal_quantity) def test_select_step_degree(): assert_almost_equal_quantity(select_step_degree(127 * u.deg), 180. * u.deg) assert_almost_equal_quantity(select_step_degree(44 * u.deg), 45. * u.deg) assert_almost_equal_quantity(select_step_degree(18 * u.arcmin), 15 * u.arcmin) assert_almost_equal_quantity(select_step_degree(3.4 * u.arcmin), 3 * u.arcmin) assert_almost_equal_quantity(select_step_degree(2 * u.arcmin), 2 * u.arcmin) assert_almost_equal_quantity(select_step_degree(59 * u.arcsec), 1 * u.arcmin) assert_almost_equal_quantity(select_step_degree(33 * u.arcsec), 30 * u.arcsec) assert_almost_equal_quantity(select_step_degree(2.2 * u.arcsec), 2 * u.arcsec) assert_almost_equal_quantity(select_step_degree(0.8 * u.arcsec), 1 * u.arcsec) assert_almost_equal_quantity(select_step_degree(0.2 * u.arcsec), 0.2 * u.arcsec) assert_almost_equal_quantity(select_step_degree(0.11 * u.arcsec), 0.1 * u.arcsec) assert_almost_equal_quantity(select_step_degree(0.022 * u.arcsec), 0.02 * u.arcsec) assert_almost_equal_quantity(select_step_degree(0.0043 * u.arcsec), 0.005 * u.arcsec) assert_almost_equal_quantity(select_step_degree(0.00083 * u.arcsec), 0.001 * u.arcsec) assert_almost_equal_quantity(select_step_degree(0.000027 * u.arcsec), 0.00002 * u.arcsec) def test_select_step_hour(): assert_almost_equal_quantity(select_step_hour(127 * u.deg), 8. * u.hourangle) assert_almost_equal_quantity(select_step_hour(44 * u.deg), 3. * u.hourangle) assert_almost_equal_quantity(select_step_hour(18 * u.arcmin), 15 * u.arcmin) assert_almost_equal_quantity(select_step_hour(3.4 * u.arcmin), 3 * u.arcmin) assert_almost_equal_quantity(select_step_hour(2 * u.arcmin), 1.5 * u.arcmin) assert_almost_equal_quantity(select_step_hour(59 * u.arcsec), 1 * u.arcmin) assert_almost_equal_quantity(select_step_hour(33 * u.arcsec), 30 * u.arcsec) assert_almost_equal_quantity(select_step_hour(2.2 * u.arcsec), 3. * u.arcsec) assert_almost_equal_quantity(select_step_hour(0.8 * u.arcsec), 0.75 * u.arcsec) assert_almost_equal_quantity(select_step_hour(0.2 * u.arcsec), 0.15 * u.arcsec) assert_almost_equal_quantity(select_step_hour(0.11 * u.arcsec), 0.15 * u.arcsec) assert_almost_equal_quantity(select_step_hour(0.022 * u.arcsec), 0.03 * u.arcsec) assert_almost_equal_quantity(select_step_hour(0.0043 * u.arcsec), 0.003 * u.arcsec) assert_almost_equal_quantity(select_step_hour(0.00083 * u.arcsec), 0.00075 * u.arcsec) assert_almost_equal_quantity(select_step_hour(0.000027 * u.arcsec), 0.00003 * u.arcsec) def test_select_step_scalar(): assert_almost_equal(select_step_scalar(33122.), 50000.) assert_almost_equal(select_step_scalar(433.), 500.) assert_almost_equal(select_step_scalar(12.3), 10) assert_almost_equal(select_step_scalar(3.3), 5.) assert_almost_equal(select_step_scalar(0.66), 0.5) assert_almost_equal(select_step_scalar(0.0877), 0.1) assert_almost_equal(select_step_scalar(0.00577), 0.005) assert_almost_equal(select_step_scalar(0.00022), 0.0002) assert_almost_equal(select_step_scalar(0.000012), 0.00001) assert_almost_equal(select_step_scalar(0.000000443), 0.0000005) def test_coord_type_from_ctype(): assert coord_type_from_ctype(' LON') == ('longitude', None) assert coord_type_from_ctype(' LAT') == ('latitude', None) assert coord_type_from_ctype('HPLN') == ('longitude', 180.) assert coord_type_from_ctype('HPLT') == ('latitude', None) assert coord_type_from_ctype('RA--') == ('longitude', None) assert coord_type_from_ctype('DEC-') == ('latitude', None) assert coord_type_from_ctype('spam') == ('scalar', None)
4a919dde900f23fe0ec43e4068d5e3e5eaff32edacb505fee6ec54d1fb4fbbc9
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This sub-package makes use of image testing with the pytest-mpl package: # # https://pypi.python.org/pypi/pytest-mpl # # For more information on writing image tests, see the 'Image tests with # pytest-mpl' section of the developer docs.
2697d83cc9cec8ea8e2a9d16ef8d44d3b5bcb4fa85befe9a10026412e516ae1d
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np import matplotlib.pyplot as plt from ....wcs import WCS from .. import WCSAxes from ..frame import BaseFrame from ....tests.image_tests import IMAGE_REFERENCE_DIR from .test_images import BaseImageTests class HexagonalFrame(BaseFrame): spine_names = 'abcdef' def update_spines(self): xmin, xmax = self.parent_axes.get_xlim() ymin, ymax = self.parent_axes.get_ylim() ymid = 0.5 * (ymin + ymax) xmid1 = (xmin + xmax) / 4. xmid2 = (xmin + xmax) * 3. / 4. self['a'].data = np.array(([xmid1, ymin], [xmid2, ymin])) self['b'].data = np.array(([xmid2, ymin], [xmax, ymid])) self['c'].data = np.array(([xmax, ymid], [xmid2, ymax])) self['d'].data = np.array(([xmid2, ymax], [xmid1, ymax])) self['e'].data = np.array(([xmid1, ymax], [xmin, ymid])) self['f'].data = np.array(([xmin, ymid], [xmid1, ymin])) class TestFrame(BaseImageTests): @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='custom_frame.png', tolerance=0, style={}) def test_custom_frame(self): wcs = WCS(self.msx_header) fig = plt.figure(figsize=(4, 4)) ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], wcs=wcs, frame_class=HexagonalFrame) fig.add_axes(ax) ax.coords.grid(color='white') im = ax.imshow(np.ones((149, 149)), vmin=0., vmax=2., origin='lower', cmap=plt.cm.gist_heat) minpad = {} minpad['a'] = minpad['d'] = 1 minpad['b'] = minpad['c'] = minpad['e'] = minpad['f'] = 2.75 ax.coords['glon'].set_axislabel("Longitude", minpad=minpad) ax.coords['glon'].set_axislabel_position('ad') ax.coords['glat'].set_axislabel("Latitude", minpad=minpad) ax.coords['glat'].set_axislabel_position('bcef') ax.coords['glon'].set_ticklabel_position('ad') ax.coords['glat'].set_ticklabel_position('bcef') # Set limits so that no labels overlap ax.set_xlim(5.5, 100.5) ax.set_ylim(5.5, 110.5) # Clip the image to the frame im.set_clip_path(ax.coords.frame.patch) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='update_clip_path_rectangular.png', tolerance=0, style={}) def test_update_clip_path_rectangular(self, tmpdir): fig = plt.figure() ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal') fig.add_axes(ax) ax.set_xlim(0., 2.) ax.set_ylim(0., 2.) # Force drawing, which freezes the clip path returned by WCSAxes fig.savefig(tmpdir.join('nothing').strpath) ax.imshow(np.zeros((12, 4))) ax.set_xlim(-0.5, 3.5) ax.set_ylim(-0.5, 11.5) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='update_clip_path_nonrectangular.png', tolerance=0, style={}) def test_update_clip_path_nonrectangular(self, tmpdir): fig = plt.figure() ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal', frame_class=HexagonalFrame) fig.add_axes(ax) ax.set_xlim(0., 2.) ax.set_ylim(0., 2.) # Force drawing, which freezes the clip path returned by WCSAxes fig.savefig(tmpdir.join('nothing').strpath) ax.imshow(np.zeros((12, 4))) ax.set_xlim(-0.5, 3.5) ax.set_ylim(-0.5, 11.5) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='update_clip_path_change_wcs.png', tolerance=0, style={}) def test_update_clip_path_change_wcs(self, tmpdir): # When WCS is changed, a new frame is created, so we need to make sure # that the path is carried over to the new frame. fig = plt.figure() ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal') fig.add_axes(ax) ax.set_xlim(0., 2.) ax.set_ylim(0., 2.) # Force drawing, which freezes the clip path returned by WCSAxes fig.savefig(tmpdir.join('nothing').strpath) ax.reset_wcs() ax.imshow(np.zeros((12, 4))) ax.set_xlim(-0.5, 3.5) ax.set_ylim(-0.5, 11.5) return fig def test_copy_frame_properties_change_wcs(self): # When WCS is changed, a new frame is created, so we need to make sure # that the color and linewidth are transferred over fig = plt.figure() ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8]) fig.add_axes(ax) ax.coords.frame.set_linewidth(5) ax.coords.frame.set_color('purple') ax.reset_wcs() assert ax.coords.frame.get_linewidth() == 5 assert ax.coords.frame.get_color() == 'purple'
df93cf4723d75c5cee5faea17ef9f75820da4314901d60f5cb3df3f21aa39c17
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Downloads the FITS files that are used in image testing and for building documentation. """ from ....utils.data import get_pkg_data_filename from ....io import fits __all__ = ['fetch_msx_hdu', 'fetch_rosat_hdu', 'fetch_twoMASS_k_hdu', 'fetch_l1448_co_hdu', 'fetch_bolocam_hdu', ] def fetch_hdu(filename): """ Download a FITS file to the cache and open HDU 0. """ path = get_pkg_data_filename(filename) return fits.open(path)[0] def fetch_msx_hdu(): """Fetch the MSX example dataset HDU. Returns ------- hdu : `~astropy.io.fits.ImageHDU` Image HDU """ return fetch_hdu('galactic_center/gc_msx_e.fits') def fetch_rosat_hdu(): return fetch_hdu('allsky/allsky_rosat.fits') def fetch_twoMASS_k_hdu(): return fetch_hdu('galactic_center/gc_2mass_k.fits') def fetch_l1448_co_hdu(): return fetch_hdu('l1448/l1448_13co.fits') def fetch_bolocam_hdu(): return fetch_hdu('galactic_center/gc_bolocam_gps.fits')
3aa0617127f5c0ccd43acc2979830d12b157f842f3a6fdd301d7c362abb11e5d
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np import matplotlib.pyplot as plt from .... import units as u from ....wcs import WCS from .. import WCSAxes from .test_images import BaseImageTests from ..transforms import CurvedTransform from ....tests.image_tests import IMAGE_REFERENCE_DIR # Create fake transforms that roughly mimic a polar projection class DistanceToLonLat(CurvedTransform): def __init__(self, R=6e3): super().__init__() self.R = R def transform(self, xy): x, y = xy[:, 0], xy[:, 1] lam = np.degrees(np.arctan2(y, x)) phi = 90. - np.degrees(np.hypot(x, y) / self.R) return np.array((lam, phi)).transpose() transform_non_affine = transform def inverted(self): return LonLatToDistance(R=self.R) class LonLatToDistance(CurvedTransform): def __init__(self, R=6e3): super().__init__() self.R = R def transform(self, lamphi): lam, phi = lamphi[:, 0], lamphi[:, 1] r = np.radians(90 - phi) * self.R x = r * np.cos(np.radians(lam)) y = r * np.sin(np.radians(lam)) return np.array((x, y)).transpose() transform_non_affine = transform def inverted(self): return DistanceToLonLat(R=self.R) class TestTransformCoordMeta(BaseImageTests): @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='coords_overlay.png', tolerance=0, style={}) def test_coords_overlay(self): # Set up a simple WCS that maps pixels to non-projected distances wcs = WCS(naxis=2) wcs.wcs.ctype = ['x', 'y'] wcs.wcs.cunit = ['km', 'km'] wcs.wcs.crpix = [614.5, 856.5] wcs.wcs.cdelt = [6.25, 6.25] wcs.wcs.crval = [0., 0.] fig = plt.figure(figsize=(4, 4)) ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], wcs=wcs) fig.add_axes(ax) s = DistanceToLonLat(R=6378.273) ax.coords['x'].set_ticklabel_position('') ax.coords['y'].set_ticklabel_position('') coord_meta = {} coord_meta['type'] = ('longitude', 'latitude') coord_meta['wrap'] = (360., None) coord_meta['unit'] = (u.deg, u.deg) coord_meta['name'] = 'lon', 'lat' overlay = ax.get_coords_overlay(s, coord_meta=coord_meta) overlay.grid(color='red') overlay['lon'].grid(color='red', linestyle='solid', alpha=0.3) overlay['lat'].grid(color='blue', linestyle='solid', alpha=0.3) overlay['lon'].set_ticklabel(size=7) overlay['lat'].set_ticklabel(size=7) overlay['lon'].set_ticklabel_position('brtl') overlay['lat'].set_ticklabel_position('brtl') overlay['lon'].set_ticks(spacing=10. * u.deg, exclude_overlapping=True) overlay['lat'].set_ticks(spacing=10. * u.deg, exclude_overlapping=True) ax.set_xlim(-0.5, 1215.5) ax.set_ylim(-0.5, 1791.5) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='coords_overlay_auto_coord_meta.png', tolerance=0, style={}) def test_coords_overlay_auto_coord_meta(self): fig = plt.figure(figsize=(4, 4)) ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], wcs=WCS(self.msx_header)) fig.add_axes(ax) ax.grid(color='red', alpha=0.5, linestyle='solid') overlay = ax.get_coords_overlay('fk5') # automatically sets coord_meta overlay.grid(color='black', alpha=0.5, linestyle='solid') overlay['ra'].set_ticks(color='black') overlay['dec'].set_ticks(color='black') ax.set_xlim(-0.5, 148.5) ax.set_ylim(-0.5, 148.5) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='direct_init.png', tolerance=0, style={}) def test_direct_init(self): s = DistanceToLonLat(R=6378.273) coord_meta = {} coord_meta['type'] = ('longitude', 'latitude') coord_meta['wrap'] = (360., None) coord_meta['unit'] = (u.deg, u.deg) coord_meta['name'] = 'lon', 'lat' fig = plt.figure(figsize=(4, 4)) ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], transform=s, coord_meta=coord_meta) fig.add_axes(ax) ax.coords['lon'].grid(color='red', linestyle='solid', alpha=0.3) ax.coords['lat'].grid(color='blue', linestyle='solid', alpha=0.3) ax.coords['lon'].set_ticklabel(size=7) ax.coords['lat'].set_ticklabel(size=7) ax.coords['lon'].set_ticklabel_position('brtl') ax.coords['lat'].set_ticklabel_position('brtl') ax.coords['lon'].set_ticks(spacing=10. * u.deg, exclude_overlapping=True) ax.coords['lat'].set_ticks(spacing=10. * u.deg, exclude_overlapping=True) ax.set_xlim(-400., 500.) ax.set_ylim(-300., 400.) return fig
227a7fb9184ff690e3480a07a8252e3b643f1ed1ae6431c28a92f7db5858fe7a
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy.testing import assert_almost_equal from matplotlib import rc_context from .... import units as u from ..formatter_locator import AngleFormatterLocator, ScalarFormatterLocator class TestAngleFormatterLocator: def test_no_options(self): fl = AngleFormatterLocator() assert fl.values is None assert fl.number == 5 assert fl.spacing is None def test_too_many_options(self): with pytest.raises(ValueError) as exc: AngleFormatterLocator(values=[1., 2.], number=5) assert exc.value.args[0] == "At most one of values/number/spacing can be specifed" with pytest.raises(ValueError) as exc: AngleFormatterLocator(values=[1., 2.], spacing=5. * u.deg) assert exc.value.args[0] == "At most one of values/number/spacing can be specifed" with pytest.raises(ValueError) as exc: AngleFormatterLocator(number=5, spacing=5. * u.deg) assert exc.value.args[0] == "At most one of values/number/spacing can be specifed" with pytest.raises(ValueError) as exc: AngleFormatterLocator(values=[1., 2.], number=5, spacing=5. * u.deg) assert exc.value.args[0] == "At most one of values/number/spacing can be specifed" def test_values(self): fl = AngleFormatterLocator(values=[0.1, 1., 14.] * u.degree) assert fl.values.to_value(u.degree).tolist() == [0.1, 1., 14.] assert fl.number is None assert fl.spacing is None values, spacing = fl.locator(34.3, 55.4) assert_almost_equal(values.to_value(u.degree), [0.1, 1., 14.]) def test_number(self): fl = AngleFormatterLocator(number=7) assert fl.values is None assert fl.number == 7 assert fl.spacing is None values, spacing = fl.locator(34.3, 55.4) assert_almost_equal(values.to_value(u.degree), [35., 40., 45., 50., 55.]) values, spacing = fl.locator(34.3, 36.1) assert_almost_equal(values.to_value(u.degree), [34.5, 34.75, 35., 35.25, 35.5, 35.75, 36.]) fl.format = 'dd' values, spacing = fl.locator(34.3, 36.1) assert_almost_equal(values.to_value(u.degree), [35., 36.]) def test_spacing(self): with pytest.raises(TypeError) as exc: AngleFormatterLocator(spacing=3.) assert exc.value.args[0] == "spacing should be an astropy.units.Quantity instance with units of angle" fl = AngleFormatterLocator(spacing=3. * u.degree) assert fl.values is None assert fl.number is None assert fl.spacing == 3. * u.degree values, spacing = fl.locator(34.3, 55.4) assert_almost_equal(values.to_value(u.degree), [36., 39., 42., 45., 48., 51., 54.]) fl.spacing = 30. * u.arcmin values, spacing = fl.locator(34.3, 36.1) assert_almost_equal(values.to_value(u.degree), [34.5, 35., 35.5, 36.]) fl.format = 'dd' values, spacing = fl.locator(34.3, 36.1) assert_almost_equal(values.to_value(u.degree), [35., 36.]) def test_minor_locator(self): fl = AngleFormatterLocator() values, spacing = fl.locator(34.3, 55.4) minor_values = fl.minor_locator(spacing, 5, 34.3, 55.4) assert_almost_equal(minor_values.to_value(u.degree), [36., 37., 38., 39., 41., 42., 43., 44., 46., 47., 48., 49., 51., 52., 53., 54.]) minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4) assert_almost_equal(minor_values.to_value(u.degree), [37.5, 42.5, 47.5, 52.5]) fl.values = [0.1, 1., 14.] * u.degree values, spacing = fl.locator(34.3, 36.1) minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4) assert_almost_equal(minor_values.to_value(u.degree), []) @pytest.mark.parametrize(('format', 'string'), [('dd', '15\xb0'), ('dd:mm', '15\xb024\''), ('dd:mm:ss', '15\xb023\'32"'), ('dd:mm:ss.s', '15\xb023\'32.0"'), ('dd:mm:ss.ssss', '15\xb023\'32.0316"'), ('hh', '1h'), ('hh:mm', '1h02m'), ('hh:mm:ss', '1h01m34s'), ('hh:mm:ss.s', '1h01m34.1s'), ('hh:mm:ss.ssss', '1h01m34.1354s'), ('d', '15'), ('d.d', '15.4'), ('d.dd', '15.39'), ('d.ddd', '15.392'), ('m', '924'), ('m.m', '923.5'), ('m.mm', '923.53'), ('s', '55412'), ('s.s', '55412.0'), ('s.ss', '55412.03'), ]) def test_format(self, format, string): fl = AngleFormatterLocator(number=5, format=format) assert fl.formatter([15.392231] * u.degree, None)[0] == string @pytest.mark.parametrize(('separator', 'format', 'string'), [(('deg', "'", '"'), 'dd', '15deg'), (('deg', "'", '"'), 'dd:mm', '15deg24\''), (('deg', "'", '"'), 'dd:mm:ss', '15deg23\'32"'), ((':', "-", 's'), 'dd:mm:ss.s', '15:23-32.0s'), (':', 'dd:mm:ss.s', '15:23:32.0'), ((':', ":", 's'), 'hh', '1:'), (('-', "-", 's'), 'hh:mm:ss.ssss', '1-01-34.1354s'), (('d', ":", '"'), 'd', '15'), (('d', ":", '"'), 'd.d', '15.4'), ]) def test_separator(self, separator, format, string): fl = AngleFormatterLocator(number=5, format=format) fl.sep = separator assert fl.formatter([15.392231] * u.degree, None)[0] == string def test_latex_format(self): fl = AngleFormatterLocator(number=5, format="dd:mm:ss") assert fl.formatter([15.392231] * u.degree, None)[0] == '15\xb023\'32"' with rc_context(rc={'text.usetex': True}): assert fl.formatter([15.392231] * u.degree, None)[0] == "15$^\\circ$23'32\"" @pytest.mark.parametrize(('format'), ['x.xxx', 'dd.ss', 'dd:ss', 'mdd:mm:ss']) def test_invalid_formats(self, format): fl = AngleFormatterLocator(number=5) with pytest.raises(ValueError) as exc: fl.format = format assert exc.value.args[0] == "Invalid format: " + format @pytest.mark.parametrize(('format', 'base_spacing'), [('dd', 1. * u.deg), ('dd:mm', 1. * u.arcmin), ('dd:mm:ss', 1. * u.arcsec), ('dd:mm:ss.ss', 0.01 * u.arcsec), ('hh', 15. * u.deg), ('hh:mm', 15. * u.arcmin), ('hh:mm:ss', 15. * u.arcsec), ('hh:mm:ss.ss', 0.15 * u.arcsec), ('d', 1. * u.deg), ('d.d', 0.1 * u.deg), ('d.dd', 0.01 * u.deg), ('d.ddd', 0.001 * u.deg), ('m', 1. * u.arcmin), ('m.m', 0.1 * u.arcmin), ('m.mm', 0.01 * u.arcmin), ('s', 1. * u.arcsec), ('s.s', 0.1 * u.arcsec), ('s.ss', 0.01 * u.arcsec), ]) def test_base_spacing(self, format, base_spacing): fl = AngleFormatterLocator(number=5, format=format) assert fl.base_spacing == base_spacing def test_incorrect_spacing(self): fl = AngleFormatterLocator() fl.spacing = 0.032 * u.deg fl.format = 'dd:mm:ss' assert_almost_equal(fl.spacing.to_value(u.arcsec), 115.) @pytest.mark.parametrize(('spacing', 'string'), [(2 * u.deg, '15\xb0'), (2 * u.arcmin, '15\xb024\''), (2 * u.arcsec, '15\xb023\'32"'), (0.1 * u.arcsec, '15\xb023\'32.0"')]) def test_formatter_no_format(self, spacing, string): fl = AngleFormatterLocator() assert fl.formatter([15.392231] * u.degree, spacing)[0] == string class TestScalarFormatterLocator: def test_no_options(self): fl = ScalarFormatterLocator(unit=u.m) assert fl.values is None assert fl.number == 5 assert fl.spacing is None def test_too_many_options(self): with pytest.raises(ValueError) as exc: ScalarFormatterLocator(values=[1., 2.] * u.m, number=5) assert exc.value.args[0] == "At most one of values/number/spacing can be specifed" with pytest.raises(ValueError) as exc: ScalarFormatterLocator(values=[1., 2.] * u.m, spacing=5. * u.m) assert exc.value.args[0] == "At most one of values/number/spacing can be specifed" with pytest.raises(ValueError) as exc: ScalarFormatterLocator(number=5, spacing=5. * u.m) assert exc.value.args[0] == "At most one of values/number/spacing can be specifed" with pytest.raises(ValueError) as exc: ScalarFormatterLocator(values=[1., 2.] * u.m, number=5, spacing=5. * u.m) assert exc.value.args[0] == "At most one of values/number/spacing can be specifed" def test_values(self): fl = ScalarFormatterLocator(values=[0.1, 1., 14.] * u.m, unit=u.m) assert fl.values.value.tolist() == [0.1, 1., 14.] assert fl.number is None assert fl.spacing is None values, spacing = fl.locator(34.3, 55.4) assert_almost_equal(values.value, [0.1, 1., 14.]) def test_number(self): fl = ScalarFormatterLocator(number=7, unit=u.m) assert fl.values is None assert fl.number == 7 assert fl.spacing is None values, spacing = fl.locator(34.3, 55.4) assert_almost_equal(values.value, np.linspace(36., 54., 10)) values, spacing = fl.locator(34.3, 36.1) assert_almost_equal(values.value, np.linspace(34.4, 36, 9)) fl.format = 'x' values, spacing = fl.locator(34.3, 36.1) assert_almost_equal(values.value, [35., 36.]) def test_spacing(self): fl = ScalarFormatterLocator(spacing=3. * u.m) assert fl.values is None assert fl.number is None assert fl.spacing == 3. * u.m values, spacing = fl.locator(34.3, 55.4) assert_almost_equal(values.value, [36., 39., 42., 45., 48., 51., 54.]) fl.spacing = 0.5 * u.m values, spacing = fl.locator(34.3, 36.1) assert_almost_equal(values.value, [34.5, 35., 35.5, 36.]) fl.format = 'x' values, spacing = fl.locator(34.3, 36.1) assert_almost_equal(values.value, [35., 36.]) def test_minor_locator(self): fl = ScalarFormatterLocator(unit=u.m) values, spacing = fl.locator(34.3, 55.4) minor_values = fl.minor_locator(spacing, 5, 34.3, 55.4) assert_almost_equal(minor_values.value, [36., 37., 38., 39., 41., 42., 43., 44., 46., 47., 48., 49., 51., 52., 53., 54.]) print('minor_values: ' + str(minor_values)) minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4) assert_almost_equal(minor_values.value, [37.5, 42.5, 47.5, 52.5]) fl.values = [0.1, 1., 14.] * u.m values, spacing = fl.locator(34.3, 36.1) minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4) assert_almost_equal(minor_values.value, []) @pytest.mark.parametrize(('format', 'string'), [('x', '15'), ('x.x', '15.4'), ('x.xx', '15.39'), ('x.xxx', '15.392'), ('%g', '15.3922'), ('%f', '15.392231'), ('%.2f', '15.39'), ('%.3f', '15.392')]) def test_format(self, format, string): fl = ScalarFormatterLocator(number=5, format=format, unit=u.m) assert fl.formatter([15.392231] * u.m, None)[0] == string @pytest.mark.parametrize(('format', 'string'), [('x', '1539'), ('x.x', '1539.2'), ('x.xx', '1539.22'), ('x.xxx', '1539.223')]) def test_format_unit(self, format, string): fl = ScalarFormatterLocator(number=5, format=format, unit=u.m) fl.format_unit = u.cm assert fl.formatter([15.392231] * u.m, None)[0] == string @pytest.mark.parametrize(('format'), ['dd', 'dd:mm', 'xx:mm', 'mx.xxx']) def test_invalid_formats(self, format): fl = ScalarFormatterLocator(number=5, unit=u.m) with pytest.raises(ValueError) as exc: fl.format = format assert exc.value.args[0] == "Invalid format: " + format @pytest.mark.parametrize(('format', 'base_spacing'), [('x', 1. * u.m), ('x.x', 0.1 * u.m), ('x.xxx', 0.001 * u.m)]) def test_base_spacing(self, format, base_spacing): fl = ScalarFormatterLocator(number=5, format=format, unit=u.m) assert fl.base_spacing == base_spacing def test_incorrect_spacing(self): fl = ScalarFormatterLocator(unit=u.m) fl.spacing = 0.032 * u.m fl.format = 'x.xx' assert_almost_equal(fl.spacing.to_value(u.m), 0.03)
0982303b5e2a75b83e5b200b6ad2e2721d6db920d309205f122fa0a62efb370b
def get_package_data(): return {'astropy.visualization.wcsaxes.tests': ['baseline_images/*/*.png', 'data/*']}
f8f85dd37fd0f07992fa81ddffe477e64d1f4ef72cfd0a0a27f5759e0dc0c831
# Licensed under a 3-clause BSD style license - see LICENSE.rst from ..core import WCSAxes import matplotlib.pyplot as plt from matplotlib.backend_bases import KeyEvent from ....wcs import WCS from ....coordinates import FK5 from ....time import Time from ....tests.image_tests import ignore_matplotlibrc from .test_images import BaseImageTests class TestDisplayWorldCoordinate(BaseImageTests): @ignore_matplotlibrc def test_overlay_coords(self, tmpdir): wcs = WCS(self.msx_header) fig = plt.figure(figsize=(4, 4)) canvas = fig.canvas ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs) fig.add_axes(ax) # On some systems, fig.canvas.draw is not enough to force a draw, so we # save to a temporary file. fig.savefig(tmpdir.join('test1.png').strpath) # Testing default displayed world coordinates string_world = ax._display_world_coords(0.523412, 0.518311) assert string_world == '0\xb029\'45" -0\xb029\'20" (world)' # Test pixel coordinates event1 = KeyEvent('test_pixel_coords', canvas, 'w') fig.canvas.key_press_event(event1.key, guiEvent=event1) string_pixel = ax._display_world_coords(0.523412, 0.523412) assert string_pixel == "0.523412 0.523412 (pixel)" event3 = KeyEvent('test_pixel_coords', canvas, 'w') fig.canvas.key_press_event(event3.key, guiEvent=event3) # Test that it still displays world coords when there are no overlay coords string_world2 = ax._display_world_coords(0.523412, 0.518311) assert string_world2 == '0\xb029\'45" -0\xb029\'20" (world)' overlay = ax.get_coords_overlay('fk5') # Regression test for bug that caused format to always be taken from # main world coordinates. overlay[0].set_major_formatter('d.ddd') # On some systems, fig.canvas.draw is not enough to force a draw, so we # save to a temporary file. fig.savefig(tmpdir.join('test2.png').strpath) event4 = KeyEvent('test_pixel_coords', canvas, 'w') fig.canvas.key_press_event(event4.key, guiEvent=event4) # Test that it displays the overlay world coordinates string_world3 = ax._display_world_coords(0.523412, 0.518311) assert string_world3 == '267.176 -28\xb045\'56" (world, overlay 1)' overlay = ax.get_coords_overlay(FK5()) # Regression test for bug that caused format to always be taken from # main world coordinates. overlay[0].set_major_formatter('d.ddd') # On some systems, fig.canvas.draw is not enough to force a draw, so we # save to a temporary file. fig.savefig(tmpdir.join('test3.png').strpath) event5 = KeyEvent('test_pixel_coords', canvas, 'w') fig.canvas.key_press_event(event4.key, guiEvent=event4) # Test that it displays the overlay world coordinates string_world4 = ax._display_world_coords(0.523412, 0.518311) assert string_world4 == '267.176 -28\xb045\'56" (world, overlay 2)' overlay = ax.get_coords_overlay(FK5(equinox=Time("J2030"))) # Regression test for bug that caused format to always be taken from # main world coordinates. overlay[0].set_major_formatter('d.ddd') # On some systems, fig.canvas.draw is not enough to force a draw, so we # save to a temporary file. fig.savefig(tmpdir.join('test4.png').strpath) event6 = KeyEvent('test_pixel_coords', canvas, 'w') fig.canvas.key_press_event(event5.key, guiEvent=event6) # Test that it displays the overlay world coordinates string_world5 = ax._display_world_coords(0.523412, 0.518311) assert string_world5 == '267.652 -28\xb046\'23" (world, overlay 3)' @ignore_matplotlibrc def test_cube_coords(self, tmpdir): wcs = WCS(self.cube_header) fig = plt.figure(figsize=(4, 4)) canvas = fig.canvas ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs, slices=('y', 50, 'x')) fig.add_axes(ax) # On some systems, fig.canvas.draw is not enough to force a draw, so we # save to a temporary file. fig.savefig(tmpdir.join('test.png').strpath) # Testing default displayed world coordinates string_world = ax._display_world_coords(0.523412, 0.518311) assert string_world == '2563 51\xb043\'01" (world)' # Test pixel coordinates event1 = KeyEvent('test_pixel_coords', canvas, 'w') fig.canvas.key_press_event(event1.key, guiEvent=event1) string_pixel = ax._display_world_coords(0.523412, 0.523412) assert string_pixel == "0.523412 0.523412 (pixel)"
e0f60adcb9986b47e4e3f0ed7245e99e11f2769aaa8303885503b1962cfd3b1a
# Licensed under a 3-clause BSD style license - see LICENSE.rst from unittest.mock import patch import pytest import matplotlib.pyplot as plt from ..core import WCSAxes from .... import units as u from ....tests.image_tests import ignore_matplotlibrc @ignore_matplotlibrc def test_getaxislabel(): fig = plt.figure() ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal') ax.coords[0].set_axislabel("X") ax.coords[1].set_axislabel("Y") assert ax.coords[0].get_axislabel() == "X" assert ax.coords[1].get_axislabel() == "Y" @pytest.fixture def ax(): fig = plt.figure() ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal') fig.add_axes(ax) return ax def assert_label_draw(ax, x_label, y_label): ax.coords[0].set_axislabel("Label 1") ax.coords[1].set_axislabel("Label 2") with patch.object(ax.coords[0].axislabels, 'set_position') as pos1: with patch.object(ax.coords[1].axislabels, 'set_position') as pos2: ax.figure.canvas.draw() assert pos1.call_count == x_label assert pos2.call_count == y_label @ignore_matplotlibrc def test_label_visibility_rules_default(ax): assert_label_draw(ax, True, True) @ignore_matplotlibrc def test_label_visibility_rules_label(ax): ax.coords[0].set_ticklabel_visible(False) ax.coords[1].set_ticks(values=[-9999]*u.deg) assert_label_draw(ax, False, False) @ignore_matplotlibrc def test_label_visibility_rules_ticks(ax): ax.coords[0].set_axislabel_visibility_rule('ticks') ax.coords[1].set_axislabel_visibility_rule('ticks') ax.coords[0].set_ticklabel_visible(False) ax.coords[1].set_ticks(values=[-9999]*u.deg) assert_label_draw(ax, True, False) @ignore_matplotlibrc def test_label_visibility_rules_always(ax): ax.coords[0].set_axislabel_visibility_rule('always') ax.coords[1].set_axislabel_visibility_rule('always') ax.coords[0].set_ticklabel_visible(False) ax.coords[1].set_ticks(values=[-9999]*u.deg) assert_label_draw(ax, True, True)
2e5dcb76d63e0865d1365e416f668cf4ef8f52f721ec8f252a2b86519a50b9a9
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import pytest import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Circle, Rectangle from matplotlib import rc_context from .... import units as u from ....io import fits from ....wcs import WCS from ....coordinates import SkyCoord from ..patches import SphericalCircle from .. import WCSAxes from . import datasets from ....tests.image_tests import IMAGE_REFERENCE_DIR from ..frame import EllipticalFrame class BaseImageTests: @classmethod def setup_class(cls): cls._data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data')) msx_header = os.path.join(cls._data_dir, 'msx_header') cls.msx_header = fits.Header.fromtextfile(msx_header) rosat_header = os.path.join(cls._data_dir, 'rosat_header') cls.rosat_header = fits.Header.fromtextfile(rosat_header) twoMASS_k_header = os.path.join(cls._data_dir, '2MASS_k_header') cls.twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header) cube_header = os.path.join(cls._data_dir, 'cube_header') cls.cube_header = fits.Header.fromtextfile(cube_header) slice_header = os.path.join(cls._data_dir, 'slice_header') cls.slice_header = fits.Header.fromtextfile(slice_header) class TestBasic(BaseImageTests): @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='image_plot.png', tolerance=0, style={}) def test_image_plot(self): # Test for plotting image and also setting values of ticks fig = plt.figure(figsize=(6, 6)) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect='equal') ax.set_xlim(-0.5, 148.5) ax.set_ylim(-0.5, 148.5) ax.coords[0].set_ticks([-0.30, 0., 0.20] * u.degree, size=5, width=1) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, tolerance=1.5) @pytest.mark.parametrize('axisbelow', [True, False, 'line']) def test_axisbelow(self, axisbelow): # Test that tick marks, labels, and gridlines are drawn with the # correct zorder controlled by the axisbelow property. fig = plt.figure(figsize=(6, 6)) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect='equal') ax.set_axisbelow(axisbelow) ax.set_xlim(-0.5, 148.5) ax.set_ylim(-0.5, 148.5) ax.coords[0].set_ticks([-0.30, 0., 0.20] * u.degree, size=5, width=1) ax.grid() # Add an image (default zorder=0). ax.imshow(np.zeros((64, 64))) # Add a patch (default zorder=1). r = Rectangle((30., 50.), 60., 50., facecolor='green', edgecolor='red') ax.add_patch(r) # Add a line (default zorder=2). ax.plot([32, 128], [32, 128], linewidth=10) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='contour_overlay.png', tolerance=0, style={}) def test_contour_overlay(self): # Test for overlaying contours on images hdu_msx = datasets.fetch_msx_hdu() wcs_msx = WCS(self.msx_header) fig = plt.figure(figsize=(6, 6)) ax = fig.add_axes([0.15, 0.15, 0.8, 0.8], projection=WCS(self.twoMASS_k_header), aspect='equal') ax.set_xlim(-0.5, 720.5) ax.set_ylim(-0.5, 720.5) # Overplot contour ax.contour(hdu_msx.data, transform=ax.get_transform(wcs_msx), colors='orange', levels=[2.5e-5, 5e-5, 1.e-4]) ax.coords[0].set_ticks(size=5, width=1) ax.coords[1].set_ticks(size=5, width=1) ax.set_xlim(0., 720.) ax.set_ylim(0., 720.) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='overlay_features_image.png', tolerance=0, style={}) def test_overlay_features_image(self): # Test for overlaying grid, changing format of ticks, setting spacing # and number of ticks fig = plt.figure(figsize=(6, 6)) ax = fig.add_axes([0.25, 0.25, 0.65, 0.65], projection=WCS(self.msx_header), aspect='equal') # Change the format of the ticks ax.coords[0].set_major_formatter('dd:mm:ss') ax.coords[1].set_major_formatter('dd:mm:ss.ssss') # Overlay grid on image ax.grid(color='red', alpha=1.0, lw=1, linestyle='dashed') # Set the spacing of ticks on the 'glon' axis to 4 arcsec ax.coords['glon'].set_ticks(spacing=4 * u.arcsec, size=5, width=1) # Set the number of ticks on the 'glat' axis to 9 ax.coords['glat'].set_ticks(number=9, size=5, width=1) # Set labels on axes ax.coords['glon'].set_axislabel('Galactic Longitude', minpad=1.6) ax.coords['glat'].set_axislabel('Galactic Latitude', minpad=-0.75) # Change the frame linewidth and color ax.coords.frame.set_color('red') ax.coords.frame.set_linewidth(2) assert ax.coords.frame.get_color() == 'red' assert ax.coords.frame.get_linewidth() == 2 return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='curvlinear_grid_patches_image.png', tolerance=0, style={}) def test_curvilinear_grid_patches_image(self): # Overlay curvilinear grid and patches on image fig = plt.figure(figsize=(8, 8)) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.rosat_header), aspect='equal') ax.set_xlim(-0.5, 479.5) ax.set_ylim(-0.5, 239.5) ax.grid(color='black', alpha=1.0, lw=1, linestyle='dashed') p = Circle((300, 100), radius=40, ec='yellow', fc='none') ax.add_patch(p) p = Circle((30., 20.), radius=20., ec='orange', fc='none', transform=ax.get_transform('world')) ax.add_patch(p) p = Circle((60., 50.), radius=20., ec='red', fc='none', transform=ax.get_transform('fk5')) ax.add_patch(p) p = Circle((40., 60.), radius=20., ec='green', fc='none', transform=ax.get_transform('galactic')) ax.add_patch(p) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='cube_slice_image.png', tolerance=0, style={}) def test_cube_slice_image(self): # Test for cube slicing fig = plt.figure() ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.cube_header), slices=(50, 'y', 'x'), aspect='equal') ax.set_xlim(-0.5, 52.5) ax.set_ylim(-0.5, 106.5) ax.coords[2].set_axislabel('Velocity m/s') ax.coords[1].set_ticks(spacing=0.2 * u.deg, width=1, exclude_overlapping=True) ax.coords[2].set_ticks(spacing=400 * u.m / u.s, width=1, exclude_overlapping=True) ax.coords[1].grid(grid_type='contours', color='red', linestyle='solid') ax.coords[2].grid(grid_type='contours', color='red', linestyle='solid') return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='cube_slice_image_lonlat.png', tolerance=0, style={}) def test_cube_slice_image_lonlat(self): # Test for cube slicing. Here we test with longitude and latitude since # there is some longitude-specific code in _update_grid_contour. fig = plt.figure() ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.cube_header), slices=('x', 'y', 50), aspect='equal') ax.set_xlim(-0.5, 106.5) ax.set_ylim(-0.5, 106.5) ax.coords[0].grid(grid_type='contours', color='blue', linestyle='solid') ax.coords[1].grid(grid_type='contours', color='red', linestyle='solid') return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, tolerance=0, style={}) def test_plot_coord(self): fig = plt.figure(figsize=(6, 6)) ax = fig.add_axes([0.15, 0.15, 0.8, 0.8], projection=WCS(self.twoMASS_k_header), aspect='equal') ax.set_xlim(-0.5, 720.5) ax.set_ylim(-0.5, 720.5) c = SkyCoord(266 * u.deg, -29 * u.deg) ax.plot_coord(c, 'o') return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, tolerance=0, style={}) def test_plot_line(self): fig = plt.figure(figsize=(6, 6)) ax = fig.add_axes([0.15, 0.15, 0.8, 0.8], projection=WCS(self.twoMASS_k_header), aspect='equal') ax.set_xlim(-0.5, 720.5) ax.set_ylim(-0.5, 720.5) c = SkyCoord([266, 266.8] * u.deg, [-29, -28.9] * u.deg) ax.plot_coord(c) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='changed_axis_units.png', tolerance=0, style={}) def test_changed_axis_units(self): # Test to see if changing the units of axis works fig = plt.figure() ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.cube_header), slices=(50, 'y', 'x'), aspect='equal') ax.set_xlim(-0.5, 52.5) ax.set_ylim(-0.5, 106.5) ax.coords[2].set_major_formatter('x.xx') ax.coords[2].set_format_unit(u.km / u.s) ax.coords[2].set_axislabel('Velocity km/s') ax.coords[1].set_ticks(width=1, exclude_overlapping=True) ax.coords[2].set_ticks(width=1, exclude_overlapping=True) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='minor_ticks_image.png', tolerance=0, style={}) def test_minor_ticks(self): # Test for drawing minor ticks fig = plt.figure() ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.cube_header), slices=(50, 'y', 'x'), aspect='equal') ax.set_xlim(-0.5, 52.5) ax.set_ylim(-0.5, 106.5) ax.coords[2].set_ticks(exclude_overlapping=True) ax.coords[1].set_ticks(exclude_overlapping=True) ax.coords[2].display_minor_ticks(True) ax.coords[1].display_minor_ticks(True) ax.coords[2].set_minor_frequency(3) ax.coords[1].set_minor_frequency(10) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='ticks_labels.png', tolerance=0, style={}) def test_ticks_labels(self): fig = plt.figure(figsize=(6, 6)) ax = WCSAxes(fig, [0.1, 0.1, 0.7, 0.7], wcs=None) fig.add_axes(ax) ax.set_xlim(-0.5, 2) ax.set_ylim(-0.5, 2) ax.coords[0].set_ticks(size=10, color='blue', alpha=0.2, width=1) ax.coords[1].set_ticks(size=20, color='red', alpha=0.9, width=1) ax.coords[0].set_ticks_position('all') ax.coords[1].set_ticks_position('all') ax.coords[0].set_axislabel('X-axis', size=20) ax.coords[1].set_axislabel('Y-axis', color='green', size=25, weight='regular', style='normal', family='cmtt10') ax.coords[0].set_axislabel_position('t') ax.coords[1].set_axislabel_position('r') ax.coords[0].set_ticklabel(color='purple', size=15, alpha=1, weight='light', style='normal', family='cmss10') ax.coords[1].set_ticklabel(color='black', size=18, alpha=0.9, weight='bold', family='cmr10') ax.coords[0].set_ticklabel_position('all') ax.coords[1].set_ticklabel_position('r') return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='rcparams.png', tolerance=0, style={}) def test_rcparams(self): # Test default style (matplotlib.rcParams) for ticks and gridlines with rc_context({ 'xtick.color': 'red', 'xtick.major.size': 20, 'xtick.major.width': 2, 'grid.color': 'blue', 'grid.linestyle': ':', 'grid.linewidth': 1, 'grid.alpha': 0.5}): fig = plt.figure(figsize=(6, 6)) ax = WCSAxes(fig, [0.1, 0.1, 0.7, 0.7], wcs=None) fig.add_axes(ax) ax.set_xlim(-0.5, 2) ax.set_ylim(-0.5, 2) ax.grid() ax.coords[0].set_ticks(exclude_overlapping=True) ax.coords[1].set_ticks(exclude_overlapping=True) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='tick_angles.png', tolerance=0, style={}) def test_tick_angles(self): # Test that tick marks point in the correct direction, even when the # axes limits extend only over a few FITS pixels. Addresses #45, #46. w = WCS() w.wcs.ctype = ['RA---TAN', 'DEC--TAN'] w.wcs.crval = [90, 70] w.wcs.cdelt = [16, 16] w.wcs.crpix = [1, 1] w.wcs.radesys = 'ICRS' w.wcs.equinox = 2000.0 fig = plt.figure(figsize=(3, 3)) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w) ax.set_xlim(1, -1) ax.set_ylim(-1, 1) ax.grid(color='gray', alpha=0.5, linestyle='solid') ax.coords['ra'].set_ticks(color='red', size=20) ax.coords['dec'].set_ticks(color='red', size=20) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='tick_angles_non_square_axes.png', tolerance=0, style={}) def test_tick_angles_non_square_axes(self): # Test that tick marks point in the correct direction, even when the # axes limits extend only over a few FITS pixels, and the axes are # non-square. w = WCS() w.wcs.ctype = ['RA---TAN', 'DEC--TAN'] w.wcs.crval = [90, 70] w.wcs.cdelt = [16, 16] w.wcs.crpix = [1, 1] w.wcs.radesys = 'ICRS' w.wcs.equinox = 2000.0 fig = plt.figure(figsize=(6, 3)) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w) ax.set_xlim(1, -1) ax.set_ylim(-1, 1) ax.grid(color='gray', alpha=0.5, linestyle='solid') ax.coords['ra'].set_ticks(color='red', size=20) ax.coords['dec'].set_ticks(color='red', size=20) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='set_coord_type.png', tolerance=0, style={}) def test_set_coord_type(self): # Test for setting coord_type fig = plt.figure(figsize=(3, 3)) ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=WCS(self.msx_header), aspect='equal') ax.set_xlim(-0.5, 148.5) ax.set_ylim(-0.5, 148.5) ax.coords[0].set_coord_type('scalar') ax.coords[1].set_coord_type('scalar') ax.coords[0].set_major_formatter('x.xxx') ax.coords[1].set_major_formatter('x.xxx') ax.coords[0].set_ticks(exclude_overlapping=True) ax.coords[1].set_ticks(exclude_overlapping=True) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='test_ticks_regression_1.png', tolerance=0, style={}) def test_ticks_regression(self): # Regression test for a bug that caused ticks aligned exactly with a # sampled frame point to not appear. This also checks that tick labels # don't get added more than once, and that no error occurs when e.g. # the top part of the frame is all at the same coordinate as one of the # potential ticks (which causes the tick angle calculation to return # NaN). wcs = WCS(self.slice_header) fig = plt.figure(figsize=(3, 3)) ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect='auto') limits = wcs.wcs_world2pix([0, 0], [35e3, 80e3], 0)[1] ax.set_ylim(*limits) ax.coords[0].set_ticks(spacing=0.002 * u.deg) ax.coords[1].set_ticks(spacing=5 * u.km / u.s) ax.coords[0].set_ticklabel(alpha=0.5) # to see multiple labels ax.coords[1].set_ticklabel(alpha=0.5) ax.coords[0].set_ticklabel_position('all') ax.coords[1].set_ticklabel_position('all') return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='test_axislabels_regression.png', savefig_kwargs={'bbox_inches': 'tight'}, tolerance=0, style={}) def test_axislabels_regression(self): # Regression test for a bug that meant that if tick labels were made # invisible with ``set_visible(False)``, they were still added to the # list of bounding boxes for tick labels, but with default values of 0 # to 1, which caused issues. wcs = WCS(self.msx_header) fig = plt.figure(figsize=(3, 3)) ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect='auto') ax.coords[0].set_axislabel("Label 1") ax.coords[1].set_axislabel("Label 2") ax.coords[1].set_axislabel_visibility_rule('always') ax.coords[1].ticklabels.set_visible(False) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, savefig_kwargs={'bbox_inches': 'tight'}, tolerance=0, style={}) def test_noncelestial_angular(self, tmpdir): # Regression test for a bug that meant that when passing a WCS that had # angular axes and using set_coord_type to set the coordinates to # longitude/latitude, but where the WCS wasn't recognized as celestial, # the WCS units are not converted to deg, so we can't assume that # transform will always return degrees. wcs = WCS(naxis=2) wcs.wcs.ctype = ['solar-x', 'solar-y'] wcs.wcs.cunit = ['arcsec', 'arcsec'] fig = plt.figure(figsize=(3, 3)) ax = fig.add_subplot(1, 1, 1, projection=wcs) ax.imshow(np.zeros([1024, 1024]), origin='lower') ax.coords[0].set_coord_type('longitude', coord_wrap=180) ax.coords[1].set_coord_type('latitude') ax.coords[0].set_major_formatter('s.s') ax.coords[1].set_major_formatter('s.s') ax.grid(color='white', ls='solid') # Force drawing (needed for format_coord) fig.savefig(tmpdir.join('nothing').strpath) # TODO: the formatted string should show units assert ax.format_coord(512, 512) == "513.0 513.0 (world)" return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, savefig_kwargs={'bbox_inches': 'tight'}, tolerance=0, style={}) def test_patches_distortion(self, tmpdir): # Check how patches get distorted (and make sure that scatter markers # and SphericalCircle don't) wcs = WCS(self.msx_header) fig = plt.figure(figsize=(3, 3)) ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect='equal') # Pixel coordinates r = Rectangle((30., 50.), 60., 50., edgecolor='green', facecolor='none') ax.add_patch(r) # FK5 coordinates r = Rectangle((266.4, -28.9), 0.3, 0.3, edgecolor='cyan', facecolor='none', transform=ax.get_transform('fk5')) ax.add_patch(r) # FK5 coordinates c = Circle((266.4, -29.1), 0.15, edgecolor='magenta', facecolor='none', transform=ax.get_transform('fk5')) ax.add_patch(c) # Pixel coordinates ax.scatter([40, 100, 130], [30, 130, 60], s=100, edgecolor='red', facecolor=(1, 0, 0, 0.5)) # World coordinates (should not be distorted) ax.scatter(266.78238, -28.769255, transform=ax.get_transform('fk5'), s=300, edgecolor='red', facecolor='none') # World coordinates (should not be distorted) r = SphericalCircle((266.4 * u.deg, -29.1 * u.deg), 0.15 * u.degree, edgecolor='purple', facecolor='none', transform=ax.get_transform('fk5')) ax.add_patch(r) ax.coords[0].set_ticklabel_visible(False) ax.coords[1].set_ticklabel_visible(False) return fig @pytest.mark.remote_data(source='astropy') @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, tolerance=0, style={}) def test_elliptical_frame(self): # Regression test for a bug (astropy/astropy#6063) that caused labels to # be incorrectly simplified. wcs = WCS(self.msx_header) fig = plt.figure(figsize=(5, 3)) ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, frame_class=EllipticalFrame) return fig
da51c9cc0ac5e42081c1cbf081c4640403f40966859eb12ac52520841bfb07a5
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from matplotlib.transforms import Affine2D, IdentityTransform from ....wcs import WCS from ..transforms import WCSWorld2PixelTransform WCS2D = WCS(naxis=2) WCS2D.wcs.ctype = ['x', 'y'] WCS2D.wcs.cunit = ['km', 'km'] WCS2D.wcs.crpix = [614.5, 856.5] WCS2D.wcs.cdelt = [6.25, 6.25] WCS2D.wcs.crval = [0., 0.] WCS3D = WCS(naxis=3) WCS3D.wcs.ctype = ['x', 'y', 'z'] WCS3D.wcs.cunit = ['km', 'km', 'km'] WCS3D.wcs.crpix = [614.5, 856.5, 333] WCS3D.wcs.cdelt = [6.25, 6.25, 23] WCS3D.wcs.crval = [0., 0., 1.] def test_shorthand_inversion(): """Test that the Matplotlib subtraction shorthand for composing and inverting transformations works.""" w1 = WCS(naxis=2) w1.wcs.ctype = ['RA---TAN', 'DEC--TAN'] w1.wcs.crpix = [256.0, 256.0] w1.wcs.cdelt = [-0.05, 0.05] w1.wcs.crval = [120.0, -19.0] w2 = WCS(naxis=2) w2.wcs.ctype = ['RA---SIN', 'DEC--SIN'] w2.wcs.crpix = [256.0, 256.0] w2.wcs.cdelt = [-0.05, 0.05] w2.wcs.crval = [235.0, +23.7] t1 = WCSWorld2PixelTransform(w1) t2 = WCSWorld2PixelTransform(w2) assert t1 - t2 == t1 + t2.inverted() assert t1 - t2 != t2.inverted() + t1 assert t1 - t1 == IdentityTransform() # We add Affine2D to catch the fact that in Matplotlib, having a Composite # transform can end up in more strict requirements for the dimensionality. def test_2d(): world = np.ones((10, 2)) w1 = WCSWorld2PixelTransform(WCS2D) + Affine2D() pixel = w1.transform(world) world_2 = w1.inverted().transform(pixel) np.testing.assert_allclose(world, world_2) def test_3d(): world = np.ones((10, 3)) w1 = WCSWorld2PixelTransform(WCS3D, slice=('y', 0, 'x')) + Affine2D() pixel = w1.transform(world) world_2 = w1.inverted().transform(pixel) np.testing.assert_allclose(world[:, 0], world_2[:, 0]) np.testing.assert_allclose(world[:, 2], world_2[:, 2])
dc401b34d7acca692cf8079ecebd0cba0897939c33ab53561bc160c60c6a7a2e
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This module implements the Slicing mixin to the NDData class. from ... import log __all__ = ['NDSlicingMixin'] class NDSlicingMixin: """Mixin to provide slicing on objects using the `NDData` interface. The ``data``, ``mask``, ``uncertainty`` and ``wcs`` will be sliced, if set and sliceable. The ``unit`` and ``meta`` will be untouched. The return will be a reference and not a copy, if possible. Examples -------- Using this Mixin with `~astropy.nddata.NDData`: >>> from astropy.nddata import NDData, NDSlicingMixin >>> class NDDataSliceable(NDSlicingMixin, NDData): ... pass Slicing an instance containing data:: >>> nd = NDDataSliceable([1,2,3,4,5]) >>> nd[1:3] NDDataSliceable([2, 3]) Also the other attributes are sliced for example the ``mask``:: >>> import numpy as np >>> mask = np.array([True, False, True, True, False]) >>> nd2 = NDDataSliceable(nd, mask=mask) >>> nd2slc = nd2[1:3] >>> nd2slc[nd2slc.mask] NDDataSliceable([3]) Be aware that changing values of the sliced instance will change the values of the original:: >>> nd3 = nd2[1:3] >>> nd3.data[0] = 100 >>> nd2 NDDataSliceable([ 1, 100, 3, 4, 5]) See also -------- NDDataRef NDDataArray """ def __getitem__(self, item): # Abort slicing if the data is a single scalar. if self.data.shape == (): raise TypeError('scalars cannot be sliced.') # Let the other methods handle slicing. kwargs = self._slice(item) return self.__class__(**kwargs) def _slice(self, item): """Collects the sliced attributes and passes them back as `dict`. It passes uncertainty, mask and wcs to their appropriate ``_slice_*`` method, while ``meta`` and ``unit`` are simply taken from the original. The data is assumed to be sliceable and is sliced directly. When possible the return should *not* be a copy of the data but a reference. Parameters ---------- item : slice The slice passed to ``__getitem__``. Returns ------- dict : Containing all the attributes after slicing - ready to use them to create ``self.__class__.__init__(**kwargs)`` in ``__getitem__``. """ kwargs = {} kwargs['data'] = self.data[item] # Try to slice some attributes kwargs['uncertainty'] = self._slice_uncertainty(item) kwargs['mask'] = self._slice_mask(item) kwargs['wcs'] = self._slice_wcs(item) # Attributes which are copied and not intended to be sliced kwargs['unit'] = self.unit kwargs['meta'] = self.meta return kwargs def _slice_uncertainty(self, item): if self.uncertainty is None: return None try: return self.uncertainty[item] except TypeError: # Catching TypeError in case the object has no __getitem__ method. # But let IndexError raise. log.info("uncertainty cannot be sliced.") return self.uncertainty def _slice_mask(self, item): if self.mask is None: return None try: return self.mask[item] except TypeError: log.info("mask cannot be sliced.") return self.mask def _slice_wcs(self, item): if self.wcs is None: return None try: return self.wcs[item] except TypeError: log.info("wcs cannot be sliced.") return self.wcs
8673fa3df20b8fac1a79c32184e182cfda0dc80c26619f6e14b1c886bb0025b6
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This module implements the Arithmetic mixin to the NDData class. from copy import deepcopy import numpy as np from ..nduncertainty import NDUncertainty from ...units import dimensionless_unscaled from ...utils import format_doc, sharedmethod __all__ = ['NDArithmeticMixin'] # Global so it doesn't pollute the class dict unnecessarily: # Docstring templates for add, subtract, multiply, divide methods. _arit_doc = """ Performs {name} by evaluating ``self`` {op} ``operand``. Parameters ---------- operand, operand2 : `NDData`-like instance or convertible to one. If ``operand2`` is ``None`` or not given it will perform the operation ``self`` {op} ``operand``. If ``operand2`` is given it will perform ``operand`` {op} ``operand2``. If the method was called on a class rather than on the instance ``operand2`` must be given. propagate_uncertainties : `bool` or ``None``, optional If ``None`` the result will have no uncertainty. If ``False`` the result will have a copied version of the first operand that has an uncertainty. If ``True`` the result will have a correctly propagated uncertainty from the uncertainties of the operands but this assumes that the uncertainties are `NDUncertainty`-like. Default is ``True``. .. versionchanged:: 1.2 This parameter must be given as keyword-parameter. Using it as positional parameter is deprecated. ``None`` was added as valid parameter value. handle_mask : callable, ``'first_found'`` or ``None``, optional If ``None`` the result will have no mask. If ``'first_found'`` the result will have a copied version of the first operand that has a mask). If it is a callable then the specified callable must create the results ``mask`` and if necessary provide a copy. Default is `numpy.logical_or`. .. versionadded:: 1.2 handle_meta : callable, ``'first_found'`` or ``None``, optional If ``None`` the result will have no meta. If ``'first_found'`` the result will have a copied version of the first operand that has a (not empty) meta. If it is a callable then the specified callable must create the results ``meta`` and if necessary provide a copy. Default is ``None``. .. versionadded:: 1.2 compare_wcs : callable, ``'first_found'`` or ``None``, optional If ``None`` the result will have no wcs and no comparison between the wcs of the operands is made. If ``'first_found'`` the result will have a copied version of the first operand that has a wcs. If it is a callable then the specified callable must compare the ``wcs``. The resulting ``wcs`` will be like if ``False`` was given otherwise it raises a ``ValueError`` if the comparison was not successful. Default is ``'first_found'``. .. versionadded:: 1.2 uncertainty_correlation : number or `~numpy.ndarray`, optional The correlation between the two operands is used for correct error propagation for correlated data as given in: https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulas Default is 0. .. versionadded:: 1.2 kwargs : Any other parameter that should be passed to the callables used. Returns ------- result : `~astropy.nddata.NDData`-like The resulting dataset Notes ----- If a ``callable`` is used for ``mask``, ``wcs`` or ``meta`` the callable must accept the corresponding attributes as first two parameters. If the callable also needs additional parameters these can be defined as ``kwargs`` and must start with ``"wcs_"`` (for wcs callable) or ``"meta_"`` (for meta callable). This startstring is removed before the callable is called. ``"first_found"`` can also be abbreviated with ``"ff"``. """ class NDArithmeticMixin: """ Mixin class to add arithmetic to an NDData object. When subclassing, be sure to list the superclasses in the correct order so that the subclass sees NDData as the main superclass. See `~astropy.nddata.NDDataArray` for an example. Notes ----- This class only aims at covering the most common cases so there are certain restrictions on the saved attributes:: - ``uncertainty`` : has to be something that has a `NDUncertainty`-like interface for uncertainty propagation - ``mask`` : has to be something that can be used by a bitwise ``or`` operation. - ``wcs`` : has to implement a way of comparing with ``=`` to allow the operation. But there is a workaround that allows to disable handling a specific attribute and to simply set the results attribute to ``None`` or to copy the existing attribute (and neglecting the other). For example for uncertainties not representing an `NDUncertainty`-like interface you can alter the ``propagate_uncertainties`` parameter in :meth:`NDArithmeticMixin.add`. ``None`` means that the result will have no uncertainty, ``False`` means it takes the uncertainty of the first operand (if this does not exist from the second operand) as the result's uncertainty. This behaviour is also explained in the docstring for the different arithmetic operations. Decomposing the units is not attempted, mainly due to the internal mechanics of `~astropy.units.Quantity`, so the resulting data might have units like ``km/m`` if you divided for example 100km by 5m. So this Mixin has adopted this behaviour. Examples -------- Using this Mixin with `~astropy.nddata.NDData`: >>> from astropy.nddata import NDData, NDArithmeticMixin >>> class NDDataWithMath(NDArithmeticMixin, NDData): ... pass Using it with one operand on an instance:: >>> ndd = NDDataWithMath(100) >>> ndd.add(20) NDDataWithMath(120) Using it with two operand on an instance:: >>> ndd = NDDataWithMath(-4) >>> ndd.divide(1, ndd) NDDataWithMath(-0.25) Using it as classmethod requires two operands:: >>> NDDataWithMath.subtract(5, 4) NDDataWithMath(1) """ def _arithmetic(self, operation, operand, propagate_uncertainties=True, handle_mask=np.logical_or, handle_meta=None, uncertainty_correlation=0, compare_wcs='first_found', **kwds): """ Base method which calculates the result of the arithmetic operation. This method determines the result of the arithmetic operation on the ``data`` including their units and then forwards to other methods to calculate the other properties for the result (like uncertainty). Parameters ---------- operation : callable The operation that is performed on the `NDData`. Supported are `numpy.add`, `numpy.subtract`, `numpy.multiply` and `numpy.true_divide`. operand : same type (class) as self see :meth:`NDArithmeticMixin.add` propagate_uncertainties : `bool` or ``None``, optional see :meth:`NDArithmeticMixin.add` handle_mask : callable, ``'first_found'`` or ``None``, optional see :meth:`NDArithmeticMixin.add` handle_meta : callable, ``'first_found'`` or ``None``, optional see :meth:`NDArithmeticMixin.add` compare_wcs : callable, ``'first_found'`` or ``None``, optional see :meth:`NDArithmeticMixin.add` uncertainty_correlation : ``Number`` or `~numpy.ndarray`, optional see :meth:`NDArithmeticMixin.add` kwargs : Any other parameter that should be passed to the different :meth:`NDArithmeticMixin._arithmetic_mask` (or wcs, ...) methods. Returns ------- result : `~numpy.ndarray` or `~astropy.units.Quantity` The resulting data as array (in case both operands were without unit) or as quantity if at least one had a unit. kwargs : `dict` The kwargs should contain all the other attributes (besides data and unit) needed to create a new instance for the result. Creating the new instance is up to the calling method, for example :meth:`NDArithmeticMixin.add`. """ # Find the appropriate keywords for the appropriate method (not sure # if data and uncertainty are ever used ...) kwds2 = {'mask': {}, 'meta': {}, 'wcs': {}, 'data': {}, 'uncertainty': {}} for i in kwds: splitted = i.split('_', 1) try: kwds2[splitted[0]][splitted[1]] = kwds[i] except KeyError: raise KeyError('Unknown prefix {0} for parameter {1}' ''.format(splitted[0], i)) kwargs = {} # First check that the WCS allows the arithmetic operation if compare_wcs is None: kwargs['wcs'] = None elif compare_wcs in ['ff', 'first_found']: if self.wcs is None: kwargs['wcs'] = deepcopy(operand.wcs) else: kwargs['wcs'] = deepcopy(self.wcs) else: kwargs['wcs'] = self._arithmetic_wcs(operation, operand, compare_wcs, **kwds2['wcs']) # Then calculate the resulting data (which can but not needs to be a # quantity) result = self._arithmetic_data(operation, operand, **kwds2['data']) # Determine the other properties if propagate_uncertainties is None: kwargs['uncertainty'] = None elif not propagate_uncertainties: if self.uncertainty is None: kwargs['uncertainty'] = deepcopy(operand.uncertainty) else: kwargs['uncertainty'] = deepcopy(self.uncertainty) else: kwargs['uncertainty'] = self._arithmetic_uncertainty( operation, operand, result, uncertainty_correlation, **kwds2['uncertainty']) if handle_mask is None: kwargs['mask'] = None elif handle_mask in ['ff', 'first_found']: if self.mask is None: kwargs['mask'] = deepcopy(operand.mask) else: kwargs['mask'] = deepcopy(self.mask) else: kwargs['mask'] = self._arithmetic_mask(operation, operand, handle_mask, **kwds2['mask']) if handle_meta is None: kwargs['meta'] = None elif handle_meta in ['ff', 'first_found']: if not self.meta: kwargs['meta'] = deepcopy(operand.meta) else: kwargs['meta'] = deepcopy(self.meta) else: kwargs['meta'] = self._arithmetic_meta( operation, operand, handle_meta, **kwds2['meta']) # Wrap the individual results into a new instance of the same class. return result, kwargs def _arithmetic_data(self, operation, operand, **kwds): """ Calculate the resulting data Parameters ---------- operation : callable see `NDArithmeticMixin._arithmetic` parameter description. operand : `NDData`-like instance The second operand wrapped in an instance of the same class as self. kwds : Additional parameters. Returns ------- result_data : `~numpy.ndarray` or `~astropy.units.Quantity` If both operands had no unit the resulting data is a simple numpy array, but if any of the operands had a unit the return is a Quantity. """ # Do the calculation with or without units if self.unit is None and operand.unit is None: result = operation(self.data, operand.data) elif self.unit is None: result = operation(self.data * dimensionless_unscaled, operand.data * operand.unit) elif operand.unit is None: result = operation(self.data * self.unit, operand.data * dimensionless_unscaled) else: result = operation(self.data * self.unit, operand.data * operand.unit) return result def _arithmetic_uncertainty(self, operation, operand, result, correlation, **kwds): """ Calculate the resulting uncertainty. Parameters ---------- operation : callable see :meth:`NDArithmeticMixin._arithmetic` parameter description. operand : `NDData`-like instance The second operand wrapped in an instance of the same class as self. result : `~astropy.units.Quantity` or `~numpy.ndarray` The result of :meth:`NDArithmeticMixin._arithmetic_data`. correlation : number or `~numpy.ndarray` see :meth:`NDArithmeticMixin.add` parameter description. kwds : Additional parameters. Returns ------- result_uncertainty : `NDUncertainty` subclass instance or None The resulting uncertainty already saved in the same `NDUncertainty` subclass that ``self`` had (or ``operand`` if self had no uncertainty). ``None`` only if both had no uncertainty. """ # Make sure these uncertainties are NDUncertainties so this kind of # propagation is possible. if (self.uncertainty is not None and not isinstance(self.uncertainty, NDUncertainty)): raise TypeError("Uncertainty propagation is only defined for " "subclasses of NDUncertainty.") if (operand.uncertainty is not None and not isinstance(operand.uncertainty, NDUncertainty)): raise TypeError("Uncertainty propagation is only defined for " "subclasses of NDUncertainty.") # Now do the uncertainty propagation # TODO: There is no enforced requirement that actually forbids the # uncertainty to have negative entries but with correlation the # sign of the uncertainty DOES matter. if self.uncertainty is None and operand.uncertainty is None: # Neither has uncertainties so the result should have none. return None elif self.uncertainty is None: # Create a temporary uncertainty to allow uncertainty propagation # to yield the correct results. (issue #4152) self.uncertainty = operand.uncertainty.__class__(None) result_uncert = self.uncertainty.propagate(operation, operand, result, correlation) # Delete the temporary uncertainty again. self.uncertainty = None return result_uncert elif operand.uncertainty is None: # As with self.uncertainty is None but the other way around. operand.uncertainty = self.uncertainty.__class__(None) result_uncert = self.uncertainty.propagate(operation, operand, result, correlation) operand.uncertainty = None return result_uncert else: # Both have uncertainties so just propagate. return self.uncertainty.propagate(operation, operand, result, correlation) def _arithmetic_mask(self, operation, operand, handle_mask, **kwds): """ Calculate the resulting mask This is implemented as the piecewise ``or`` operation if both have a mask. Parameters ---------- operation : callable see :meth:`NDArithmeticMixin._arithmetic` parameter description. By default, the ``operation`` will be ignored. operand : `NDData`-like instance The second operand wrapped in an instance of the same class as self. handle_mask : callable see :meth:`NDArithmeticMixin.add` kwds : Additional parameters given to ``handle_mask``. Returns ------- result_mask : any type If only one mask was present this mask is returned. If neither had a mask ``None`` is returned. Otherwise ``handle_mask`` must create (and copy) the returned mask. """ # If only one mask is present we need not bother about any type checks if self.mask is None and operand.mask is None: return None elif self.mask is None: # Make a copy so there is no reference in the result. return deepcopy(operand.mask) elif operand.mask is None: return deepcopy(self.mask) else: # Now lets calculate the resulting mask (operation enforces copy) return handle_mask(self.mask, operand.mask, **kwds) def _arithmetic_wcs(self, operation, operand, compare_wcs, **kwds): """ Calculate the resulting wcs. There is actually no calculation involved but it is a good place to compare wcs information of both operands. This is currently not working properly with `~astropy.wcs.WCS` (which is the suggested class for storing as wcs property) but it will not break it neither. Parameters ---------- operation : callable see :meth:`NDArithmeticMixin._arithmetic` parameter description. By default, the ``operation`` will be ignored. operand : `NDData` instance or subclass The second operand wrapped in an instance of the same class as self. compare_wcs : callable see :meth:`NDArithmeticMixin.add` parameter description. kwds : Additional parameters given to ``compare_wcs``. Raises ------ ValueError If ``compare_wcs`` returns ``False``. Returns ------- result_wcs : any type The ``wcs`` of the first operand is returned. """ # ok, not really arithmetics but we need to check which wcs makes sense # for the result and this is an ideal place to compare the two WCS, # too. # I'll assume that the comparison returned None or False in case they # are not equal. if not compare_wcs(self.wcs, operand.wcs, **kwds): raise ValueError("WCS are not equal.") return self.wcs def _arithmetic_meta(self, operation, operand, handle_meta, **kwds): """ Calculate the resulting meta. Parameters ---------- operation : callable see :meth:`NDArithmeticMixin._arithmetic` parameter description. By default, the ``operation`` will be ignored. operand : `NDData`-like instance The second operand wrapped in an instance of the same class as self. handle_meta : callable see :meth:`NDArithmeticMixin.add` kwds : Additional parameters given to ``handle_meta``. Returns ------- result_meta : any type The result of ``handle_meta``. """ # Just return what handle_meta does with both of the metas. return handle_meta(self.meta, operand.meta, **kwds) @sharedmethod @format_doc(_arit_doc, name='addition', op='+') def add(self, operand, operand2=None, **kwargs): return self._prepare_then_do_arithmetic(np.add, operand, operand2, **kwargs) @sharedmethod @format_doc(_arit_doc, name='subtraction', op='-') def subtract(self, operand, operand2=None, **kwargs): return self._prepare_then_do_arithmetic(np.subtract, operand, operand2, **kwargs) @sharedmethod @format_doc(_arit_doc, name="multiplication", op="*") def multiply(self, operand, operand2=None, **kwargs): return self._prepare_then_do_arithmetic(np.multiply, operand, operand2, **kwargs) @sharedmethod @format_doc(_arit_doc, name="division", op="/") def divide(self, operand, operand2=None, **kwargs): return self._prepare_then_do_arithmetic(np.true_divide, operand, operand2, **kwargs) @sharedmethod def _prepare_then_do_arithmetic(self_or_cls, operation, operand, operand2, **kwargs): """Intermediate method called by public arithmetics (i.e. ``add``) before the processing method (``_arithmetic``) is invoked. .. warning:: Do not override this method in subclasses. This method checks if it was called as instance or as class method and then wraps the operands and the result from ``_arithmetics`` in the appropriate subclass. Parameters ---------- self_or_cls : instance or class ``sharedmethod`` behaves like a normal method if called on the instance (then this parameter is ``self``) but like a classmethod when called on the class (then this parameter is ``cls``). operations : callable The operation (normally a numpy-ufunc) that represents the appropriate action. operand, operand2, kwargs : See for example ``add``. Result ------ result : `~astropy.nddata.NDData`-like Depending how this method was called either ``self_or_cls`` (called on class) or ``self_or_cls.__class__`` (called on instance) is the NDData-subclass that is used as wrapper for the result. """ # DO NOT OVERRIDE THIS METHOD IN SUBCLASSES. if isinstance(self_or_cls, NDArithmeticMixin): # True means it was called on the instance, so self_or_cls is # a reference to self cls = self_or_cls.__class__ if operand2 is None: # Only one operand was given. Set operand2 to operand and # operand to self so that we call the appropriate method of the # operand. operand2 = operand operand = self_or_cls else: # Convert the first operand to the class of this method. # This is important so that always the correct _arithmetics is # called later that method. operand = cls(operand) else: # It was used as classmethod so self_or_cls represents the cls cls = self_or_cls # It was called on the class so we expect two operands! if operand2 is None: raise TypeError("operand2 must be given when the method isn't " "called on an instance.") # Convert to this class. See above comment why. operand = cls(operand) # At this point operand, operand2, kwargs and cls are determined. # Let's try to convert operand2 to the class of operand to allows for # arithmetic operations with numbers, lists, numpy arrays, numpy masked # arrays, astropy quantities, masked quantities and of other subclasses # of NDData. operand2 = cls(operand2) # Now call the _arithmetics method to do the arithmetics. result, init_kwds = operand._arithmetic(operation, operand2, **kwargs) # Return a new class based on the result return cls(result, **init_kwds)
5f4793263c238f277b5fc0c1d6bbf25f0532011251042bd4b14c25a95403ee68
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This module implements the I/O mixin to the NDData class. from ...io import registry as io_registry __all__ = ['NDIOMixin'] class NDIOMixin: """ Mixin class to connect NDData to the astropy input/output registry. This mixin adds two methods to its subclasses, ``read`` and ``write``. """ @classmethod def read(cls, *args, **kwargs): """ Read and parse gridded N-dimensional data and return as an NDData-derived object. This function provides the NDDataBase interface to the astropy unified I/O layer. This allows easily reading a file in the supported data formats. """ return io_registry.read(cls, *args, **kwargs) def write(self, *args, **kwargs): """ Write a gridded N-dimensional data object out in specified format. This function provides the NDDataBase interface to the astropy unified I/O layer. This allows easily writing a file in the supported data formats. """ io_registry.write(self, *args, **kwargs)
3891e8b863283806ac7ee6ce7dcd5b71185d261547360198c9e857ff2ad09884
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This module implements the base CCDData class. import textwrap import numpy as np import pytest from ...io import fits from ..nduncertainty import StdDevUncertainty, MissingDataAssociationException from ... import units as u from ... import log from ...wcs import WCS, FITSFixedWarning from ...tests.helper import catch_warnings from ...utils import NumpyRNGContext from ...utils.data import (get_pkg_data_filename, get_pkg_data_filenames, get_pkg_data_contents) from ..ccddata import CCDData # If additional pytest markers are defined the key in the dictionary below # should be the name of the marker. DEFAULTS = { 'seed': 123, 'data_size': 100, 'data_scale': 1.0, 'data_mean': 0.0 } DEFAULT_SEED = 123 DEFAULT_DATA_SIZE = 100 DEFAULT_DATA_SCALE = 1.0 def value_from_markers(key, request): try: val = request.keywords[key].args[0] except KeyError: val = DEFAULTS[key] return val @pytest.fixture def ccd_data(request): """ Return a CCDData object with units of ADU. The size of the data array is 100x100 but can be changed using the marker @pytest.mark.data_size(N) on the test function, where N should be the desired dimension. Data values are initialized to random numbers drawn from a normal distribution with mean of 0 and scale 1. The scale can be changed with the marker @pytest.marker.scale(s) on the test function, where s is the desired scale. The mean can be changed with the marker @pytest.marker.scale(m) on the test function, where m is the desired mean. """ size = value_from_markers('data_size', request) scale = value_from_markers('data_scale', request) mean = value_from_markers('data_mean', request) with NumpyRNGContext(DEFAULTS['seed']): data = np.random.normal(loc=mean, size=[size, size], scale=scale) fake_meta = {'my_key': 42, 'your_key': 'not 42'} ccd = CCDData(data, unit=u.adu) ccd.header = fake_meta return ccd def test_ccddata_empty(): with pytest.raises(TypeError): CCDData() # empty initializer should fail def test_ccddata_must_have_unit(): with pytest.raises(ValueError): CCDData(np.zeros([100, 100])) def test_ccddata_unit_cannot_be_set_to_none(ccd_data): with pytest.raises(TypeError): ccd_data.unit = None def test_ccddata_meta_header_conflict(): with pytest.raises(ValueError) as exc: CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2}) assert "can't have both header and meta." in str(exc) @pytest.mark.data_size(10) def test_ccddata_simple(ccd_data): assert ccd_data.shape == (10, 10) assert ccd_data.size == 100 assert ccd_data.dtype == np.dtype(float) def test_ccddata_init_with_string_electron_unit(): ccd = CCDData(np.zeros((10, 10)), unit="electron") assert ccd.unit is u.electron @pytest.mark.data_size(10) def test_initialize_from_FITS(ccd_data, tmpdir): hdu = fits.PrimaryHDU(ccd_data) hdulist = fits.HDUList([hdu]) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) cd = CCDData.read(filename, unit=u.electron) assert cd.shape == (10, 10) assert cd.size == 100 assert np.issubdtype(cd.data.dtype, np.floating) for k, v in hdu.header.items(): assert cd.meta[k] == v def test_initialize_from_fits_with_unit_in_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = u.adu.to_string() filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu # An explicit unit in the read overrides any unit in the FITS file ccd2 = CCDData.read(filename, unit="photon") assert ccd2.unit is u.photon def test_initialize_from_fits_with_ADU_in_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = 'ADU' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu def test_initialize_from_fits_with_data_in_different_extension(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu1 = fits.PrimaryHDU() hdu2 = fits.ImageHDU(fake_img) hdus = fits.HDUList([hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) with catch_warnings(FITSFixedWarning) as w: ccd = CCDData.read(filename, unit='adu') assert len(w) == 0 # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img) # check that the header is the combined header assert hdu2.header + hdu1.header == ccd.header def test_initialize_from_fits_with_extension(tmpdir): fake_img1 = np.random.random(size=(100, 100)) fake_img2 = np.random.random(size=(100, 100)) hdu0 = fits.PrimaryHDU() hdu1 = fits.ImageHDU(fake_img1) hdu2 = fits.ImageHDU(fake_img2) hdus = fits.HDUList([hdu0, hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, hdu=2, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img2) def test_write_unit_to_hdu(ccd_data, tmpdir): ccd_unit = ccd_data.unit hdulist = ccd_data.to_hdu() assert 'bunit' in hdulist[0].header assert hdulist[0].header['bunit'] == ccd_unit.to_string() def test_initialize_from_FITS_bad_keyword_raises_error(ccd_data, tmpdir): # There are two fits.open keywords that are not permitted in ccdproc: # do_not_scale_image_data and scale_back filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, do_not_scale_image_data=True) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, scale_back=True) def test_ccddata_writer(ccd_data, tmpdir): filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) ccd_disk = CCDData.read(filename, unit=ccd_data.unit) np.testing.assert_array_equal(ccd_data.data, ccd_disk.data) def test_ccddata_meta_is_case_sensitive(ccd_data): key = 'SoMeKEY' ccd_data.meta[key] = 10 assert key.lower() not in ccd_data.meta assert key.upper() not in ccd_data.meta assert key in ccd_data.meta def test_ccddata_meta_is_not_fits_header(ccd_data): ccd_data.meta = {'OBSERVER': 'Edwin Hubble'} assert not isinstance(ccd_data.meta, fits.Header) def test_fromMEF(ccd_data, tmpdir): hdu = fits.PrimaryHDU(ccd_data) hdu2 = fits.PrimaryHDU(2 * ccd_data.data) hdulist = fits.HDUList(hdu) hdulist.append(hdu2) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) # by default, we reading from the first extension cd = CCDData.read(filename, unit=u.electron) np.testing.assert_array_equal(cd.data, ccd_data.data) # but reading from the second should work too cd = CCDData.read(filename, hdu=1, unit=u.electron) np.testing.assert_array_equal(cd.data, 2 * ccd_data.data) def test_metafromheader(ccd_data): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromdict(): dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600} d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' def test_header2meta(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), unit=u.electron) d1.header = hdr assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromstring_fail(): hdr = 'this is not a valid header' with pytest.raises(TypeError): CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu) def test_setting_bad_uncertainty_raises_error(ccd_data): with pytest.raises(TypeError): # Uncertainty is supposed to be an instance of NDUncertainty ccd_data.uncertainty = 10 def test_setting_uncertainty_with_array(ccd_data): ccd_data.uncertainty = None fake_uncertainty = np.sqrt(np.abs(ccd_data.data)) ccd_data.uncertainty = fake_uncertainty.copy() np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty) def test_setting_uncertainty_wrong_shape_raises_error(ccd_data): with pytest.raises(ValueError): ccd_data.uncertainty = np.random.random(size=(3, 4)) def test_to_hdu(ccd_data): ccd_data.meta = {'observer': 'Edwin Hubble'} fits_hdulist = ccd_data.to_hdu() assert isinstance(fits_hdulist, fits.HDUList) for k, v in ccd_data.meta.items(): assert fits_hdulist[0].header[k] == v np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data) def test_copy(ccd_data): ccd_copy = ccd_data.copy() np.testing.assert_array_equal(ccd_copy.data, ccd_data.data) assert ccd_copy.unit == ccd_data.unit assert ccd_copy.meta == ccd_data.meta @pytest.mark.parametrize('operation,affects_uncertainty', [ ("multiply", True), ("divide", True), ]) @pytest.mark.parametrize('operand', [ 2.0, 2 * u.dimensionless_unscaled, 2 * u.photon / u.adu, ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) @pytest.mark.data_unit(u.adu) def test_mult_div_overload(ccd_data, operand, with_uncertainty, operation, affects_uncertainty): if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = ccd_data.__getattribute__(operation) np_method = np.__getattribute__(operation) result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): # Need the "1 *" below to force arguments to be Quantity to work around # astropy/astropy#2377 expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit assert result.unit == expected_unit else: assert result.unit == ccd_data.unit @pytest.mark.parametrize('operation,affects_uncertainty', [ ("add", False), ("subtract", False), ]) @pytest.mark.parametrize('operand,expect_failure', [ (2.0, u.UnitsError), # fail--units don't match image (2 * u.dimensionless_unscaled, u.UnitsError), # same (2 * u.adu, False), ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) @pytest.mark.data_unit(u.adu) def test_add_sub_overload(ccd_data, operand, expect_failure, with_uncertainty, operation, affects_uncertainty): if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = ccd_data.__getattribute__(operation) np_method = np.__getattribute__(operation) if expect_failure: with pytest.raises(expect_failure): result = method(operand) return else: result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): assert (result.unit == ccd_data.unit and result.unit == operand.unit) else: assert result.unit == ccd_data.unit def test_arithmetic_overload_fails(ccd_data): with pytest.raises(TypeError): ccd_data.multiply("five") with pytest.raises(TypeError): ccd_data.divide("five") with pytest.raises(TypeError): ccd_data.add("five") with pytest.raises(TypeError): ccd_data.subtract("five") def test_arithmetic_no_wcs_compare(): ccd = CCDData(np.ones((10, 10)), unit='') assert ccd.add(ccd, compare_wcs=None).wcs is None assert ccd.subtract(ccd, compare_wcs=None).wcs is None assert ccd.multiply(ccd, compare_wcs=None).wcs is None assert ccd.divide(ccd, compare_wcs=None).wcs is None def test_arithmetic_with_wcs_compare(): def return_diff_smaller_3(first, second): return abs(first - second) <= 3 ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5) assert ccd1.add(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 assert ccd1.divide(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2 def test_arithmetic_with_wcs_compare_fail(): def return_diff_smaller_1(first, second): return abs(first - second) <= 1 ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5) with pytest.raises(ValueError): ccd1.add(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_1).wcs with pytest.raises(ValueError): ccd1.divide(ccd2, compare_wcs=return_diff_smaller_1).wcs def test_arithmetic_overload_ccddata_operand(ccd_data): ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) operand = ccd_data.copy() result = ccd_data.add(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 2 * ccd_data.data) np.testing.assert_array_equal(result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array) result = ccd_data.subtract(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 0 * ccd_data.data) np.testing.assert_array_equal(result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array) result = ccd_data.multiply(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, ccd_data.data ** 2) expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) result = ccd_data.divide(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, np.ones_like(ccd_data.data)) expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) def test_arithmetic_overload_differing_units(): a = np.array([1, 2, 3]) * u.m b = np.array([1, 2, 3]) * u.cm ccddata = CCDData(a) # TODO: Could also be parametrized. res = ccddata.add(b) np.testing.assert_array_almost_equal(res.data, np.add(a, b).value) assert res.unit == np.add(a, b).unit res = ccddata.subtract(b) np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value) assert res.unit == np.subtract(a, b).unit res = ccddata.multiply(b) np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value) assert res.unit == np.multiply(a, b).unit res = ccddata.divide(b) np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value) assert res.unit == np.divide(a, b).unit def test_arithmetic_add_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.add(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.add(np.arange(3)) def test_arithmetic_subtract_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.subtract(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.subtract(np.arange(3)) def test_arithmetic_multiply_with_array(): ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m) res = ccd.multiply(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3) assert res.unit == ccd.unit def test_arithmetic_divide_with_array(): ccd = CCDData(np.ones((3, 3)), unit=u.m) res = ccd.divide(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3) assert res.unit == ccd.unit def test_history_preserved_if_metadata_is_fits_header(tmpdir): fake_img = np.random.random(size=(100, 100)) hdu = fits.PrimaryHDU(fake_img) hdu.header['history'] = 'one' hdu.header['history'] = 'two' hdu.header['history'] = 'three' assert len(hdu.header['history']) == 3 tmp_file = tmpdir.join('temp.fits').strpath hdu.writeto(tmp_file) ccd_read = CCDData.read(tmp_file, unit="adu") assert ccd_read.header['history'] == hdu.header['history'] def test_infol_logged_if_unit_in_fits_header(ccd_data, tmpdir): tmpfile = tmpdir.join('temp.fits') ccd_data.write(tmpfile.strpath) log.setLevel('INFO') explicit_unit_name = "photon" with log.log_to_list() as log_list: ccd_from_disk = CCDData.read(tmpfile.strpath, unit=explicit_unit_name) assert explicit_unit_name in log_list[0].message def test_wcs_attribute(ccd_data, tmpdir): """ Check that WCS attribute gets added to header, and that if a CCDData object is created from a FITS file with a header, and the WCS attribute is modified, then the CCDData object is turned back into an hdu, the WCS object overwrites the old WCS information in the header. """ tmpfile = tmpdir.join('temp.fits') # This wcs example is taken from the astropy.wcs docs. wcs = WCS(naxis=2) wcs.wcs.crpix = np.array(ccd_data.shape) / 2 wcs.wcs.cdelt = np.array([-0.066667, 0.066667]) wcs.wcs.crval = [0, -90] wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"] wcs.wcs.set_pv([(2, 1, 45.0)]) ccd_data.header = ccd_data.to_hdu()[0].header ccd_data.header.extend(wcs.to_header(), useblanks=False) ccd_data.write(tmpfile.strpath) # Get the header length after it has been extended by the WCS keywords original_header_length = len(ccd_data.header) ccd_new = CCDData.read(tmpfile.strpath) # WCS attribute should be set for ccd_new assert ccd_new.wcs is not None # WCS attribute should be equal to wcs above. assert ccd_new.wcs.wcs == wcs.wcs # Converting CCDData object with wcs to an hdu shouldn't # create duplicate wcs-related entries in the header. ccd_new_hdu = ccd_new.to_hdu()[0] assert len(ccd_new_hdu.header) == original_header_length # Making a CCDData with WCS (but not WCS in the header) should lead to # WCS information in the header when it is converted to an HDU. ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu") hdu = ccd_wcs_not_in_header.to_hdu()[0] wcs_header = wcs.to_header() for k in wcs_header.keys(): # Skip these keywords if they are in the WCS header because they are # not WCS-specific. if k in ['', 'COMMENT', 'HISTORY']: continue # No keyword from the WCS should be in the header. assert k not in ccd_wcs_not_in_header.header # Every keyword in the WCS should be in the header of the HDU assert hdu.header[k] == wcs_header[k] # Now check that if WCS of a CCDData is modified, then the CCDData is # converted to an HDU, the WCS keywords in the header are overwritten # with the appropriate keywords from the header. # # ccd_new has a WCS and WCS keywords in the header, so try modifying # the WCS. ccd_new.wcs.wcs.cdelt *= 2 ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0] assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0] assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1] def test_wcs_keywords_removed_from_header(): """ Test, for the file included with the nddata tests, that WCS keywords are properly removed from header. """ from ..ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) data_file = get_pkg_data_filename('data/sip-wcs.fits') ccd = CCDData.read(data_file) wcs_header = ccd.wcs.to_header() assert not (set(wcs_header) & set(ccd.meta) - keepers) # Make sure that exceptions are not raised when trying to remove missing # keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'. data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits') ccd = CCDData.read(data_file1, unit='count') def test_wcs_keyword_removal_for_wcs_test_files(): """ Test, for the WCS test files, that keyword removall works as expected. Those cover a much broader range of WCS types than test_wcs_keywords_removed_from_header """ from ..ccddata import _generate_wcs_and_update_header from ..ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) wcs_headers = get_pkg_data_filenames('../../wcs/tests/data', pattern='*.hdr') for hdr in wcs_headers: # Skip the files that are expected to be bad... if 'invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr: continue header_string = get_pkg_data_contents(hdr) wcs = WCS(header_string) header = wcs.to_header(relax=True) new_header, new_wcs = _generate_wcs_and_update_header(header) # Make sure all of the WCS-related keywords have been removed. assert not (set(new_header) & set(new_wcs.to_header(relax=True)) - keepers) # Check that the new wcs is the same as the old. new_wcs_header = new_wcs.to_header(relax=True) for k, v in new_wcs_header.items(): if isinstance(v, str): assert header[k] == v else: np.testing.assert_almost_equal(header[k], v) def test_read_wcs_not_creatable(tmpdir): # The following Header can't be converted to a WCS object. See also #6499. hdr_txt_example_WCS = textwrap.dedent(''' SIMPLE = T / Fits standard BITPIX = 16 / Bits per pixel NAXIS = 2 / Number of axes NAXIS1 = 1104 / Axis length NAXIS2 = 4241 / Axis length CRVAL1 = 164.98110962 / Physical value of the reference pixel X CRVAL2 = 44.34089279 / Physical value of the reference pixel Y CRPIX1 = -34.0 / Reference pixel in X (pixel) CRPIX2 = 2041.0 / Reference pixel in Y (pixel) CDELT1 = 0.10380000 / X Scale projected on detector (#/pix) CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix) CTYPE1 = 'RA---TAN' / Pixel coordinate system CTYPE2 = 'WAVELENGTH' / Pixel coordinate system CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1 CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2 CD1_1 = 0.20760000 / Pixel Coordinate translation matrix CD1_2 = 0.00000000 / Pixel Coordinate translation matrix CD2_1 = 0.00000000 / Pixel Coordinate translation matrix CD2_2 = 0.10380000 / Pixel Coordinate translation matrix C2YPE1 = 'RA---TAN' / Pixel coordinate system C2YPE2 = 'DEC--TAN' / Pixel coordinate system C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1 C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2 RADECSYS= 'FK5 ' / The equatorial coordinate system ''') with catch_warnings(FITSFixedWarning): hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n') hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)]) filename = tmpdir.join('afile.fits').strpath hdul.writeto(filename) # The hdr cannot be converted to a WCS object because of an # InconsistentAxisTypesError but it should still open the file ccd = CCDData.read(filename, unit='adu') assert ccd.wcs is None def test_header(ccd_data): a = {'Observer': 'Hubble'} ccd = CCDData(ccd_data, header=a) assert ccd.meta == a def test_wcs_arithmetic(ccd_data): ccd_data.wcs = 5 result = ccd_data.multiply(1.0) assert result.wcs == 5 @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_wcs_arithmetic_ccd(ccd_data, operation): ccd_data2 = ccd_data.copy() ccd_data.wcs = 5 method = ccd_data.__getattribute__(operation) result = method(ccd_data2) assert result.wcs == ccd_data.wcs assert ccd_data2.wcs is None def test_wcs_sip_handling(): """ Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive a roundtrip unchanged. """ data_file = get_pkg_data_filename('data/sip-wcs.fits') def check_wcs_ctypes(header): expected_wcs_ctypes = { 'CTYPE1': 'RA---TAN-SIP', 'CTYPE2': 'DEC--TAN-SIP' } return [header[k] == v for k, v in expected_wcs_ctypes.items()] ccd_original = CCDData.read(data_file) # After initialization the keywords should be in the WCS, not in the # meta. with fits.open(data_file) as raw: good_ctype = check_wcs_ctypes(raw[0].header) assert all(good_ctype) ccd_new = ccd_original.to_hdu() good_ctype = check_wcs_ctypes(ccd_new[0].header) assert all(good_ctype) # Try converting to header with wcs_relax=False and # the header should contain the CTYPE keywords without # the -SIP ccd_no_relax = ccd_original.to_hdu(wcs_relax=False) good_ctype = check_wcs_ctypes(ccd_no_relax[0].header) assert not any(good_ctype) assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN' assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN' @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_mask_arithmetic_ccd(ccd_data, operation): ccd_data2 = ccd_data.copy() ccd_data.mask = (ccd_data.data > 0) method = ccd_data.__getattribute__(operation) result = method(ccd_data2) np.testing.assert_equal(result.mask, ccd_data.mask) def test_write_read_multiextensionfits_mask_default(ccd_data, tmpdir): # Test that if a mask is present the mask is saved and loaded by default. ccd_data.mask = ccd_data.data > 10 filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) def test_write_read_multiextensionfits_uncertainty_default(ccd_data, tmpdir): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is not None np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_write_read_multiextensionfits_not(ccd_data, tmpdir): # Test that writing mask and uncertainty can be disabled ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None def test_write_read_multiextensionfits_custom_ext_names(ccd_data, tmpdir): # Test writing mask, uncertainty in another extension than default ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') # Try reading with defaults extension names ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None # Try reading with custom extension names ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') assert ccd_after.uncertainty is not None assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_wcs(ccd_data): ccd_data.wcs = 5 assert ccd_data.wcs == 5 def test_recognized_fits_formats_for_read_write(ccd_data, tmpdir): # These are the extensions that are supposed to be supported. supported_extensions = ['fit', 'fits', 'fts'] for ext in supported_extensions: path = tmpdir.join("test.{}".format(ext)) ccd_data.write(path.strpath) from_disk = CCDData.read(path.strpath) assert (ccd_data.data == from_disk.data).all() def test_stddevuncertainty_compat_descriptor_no_parent(): with pytest.raises(MissingDataAssociationException): StdDevUncertainty(np.ones((10, 10))).parent_nddata def test_stddevuncertainty_compat_descriptor_no_weakref(): # TODO: Remove this test if astropy 1.0 isn't supported anymore # This test might create a Memoryleak on purpose, so the last lines after # the assert are IMPORTANT cleanup. ccd = CCDData(np.ones((10, 10)), unit='') uncert = StdDevUncertainty(np.ones((10, 10))) uncert._parent_nddata = ccd assert uncert.parent_nddata is ccd uncert._parent_nddata = None
0cc2878d90478ac3e054bee1d6f7cb087298ce9786583a8e70b750c68b25ec79
# Licensed under a 3-clause BSD style license - see LICENSE.rst # Tests of NDDataBase from ..nddata_base import NDDataBase class MinimalSubclass(NDDataBase): def __init__(self): super().__init__() @property def data(self): return None @property def mask(self): return super().mask @property def unit(self): return super().unit @property def wcs(self): return super().wcs @property def meta(self): return super().meta @property def uncertainty(self): return super().uncertainty def test_nddata_base_subclass(): a = MinimalSubclass() assert a.meta is None assert a.data is None assert a.mask is None assert a.unit is None assert a.wcs is None assert a.uncertainty is None
16cb0915b956210e0f0c4d1a75d1743343232231dd2981e658d355c65b565e28
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy.testing import assert_allclose from ...tests.helper import assert_quantity_allclose from ..utils import (extract_array, add_array, subpixel_indices, block_reduce, block_replicate, overlap_slices, NoOverlapError, PartialOverlapError, Cutout2D) from ...wcs import WCS from ...coordinates import SkyCoord from ... import units as u try: import skimage # pylint: disable=W0611 HAS_SKIMAGE = True except ImportError: HAS_SKIMAGE = False test_positions = [(10.52, 3.12), (5.62, 12.97), (31.33, 31.77), (0.46, 0.94), (20.45, 12.12), (42.24, 24.42)] test_position_indices = [(0, 3), (0, 2), (4, 1), (4, 2), (4, 3), (3, 4)] test_slices = [slice(10.52, 3.12), slice(5.62, 12.97), slice(31.33, 31.77), slice(0.46, 0.94), slice(20.45, 12.12), slice(42.24, 24.42)] subsampling = 5 test_pos_bad = [(-1, -4), (-1, 0), (6, 2), (6, 6)] def test_slices_different_dim(): '''Overlap from arrays with different number of dim is undefined.''' with pytest.raises(ValueError) as e: overlap_slices((4, 5, 6), (1, 2), (0, 0)) assert "the same number of dimensions" in str(e.value) def test_slices_pos_different_dim(): '''Position must have same dim as arrays.''' with pytest.raises(ValueError) as e: overlap_slices((4, 5), (1, 2), (0, 0, 3)) assert "the same number of dimensions" in str(e.value) @pytest.mark.parametrize('pos', test_pos_bad) def test_slices_no_overlap(pos): '''If there is no overlap between arrays, an error should be raised.''' with pytest.raises(NoOverlapError): overlap_slices((5, 5), (2, 2), pos) def test_slices_partial_overlap(): '''Compute a slice for partially overlapping arrays.''' temp = overlap_slices((5,), (3,), (0,)) assert temp == ((slice(0, 2, None),), (slice(1, 3, None),)) temp = overlap_slices((5,), (3,), (0,), mode='partial') assert temp == ((slice(0, 2, None),), (slice(1, 3, None),)) for pos in [0, 4]: with pytest.raises(PartialOverlapError) as e: temp = overlap_slices((5,), (3,), (pos,), mode='strict') assert 'Arrays overlap only partially.' in str(e.value) def test_slices_overlap_wrong_mode(): '''Call overlap_slices with non-existing mode.''' with pytest.raises(ValueError) as e: overlap_slices((5,), (3,), (0,), mode='full') assert "Mode can be only" in str(e.value) def test_extract_array_wrong_mode(): '''Call extract_array with non-existing mode.''' with pytest.raises(ValueError) as e: extract_array(np.arange(4), (2, ), (0, ), mode='full') assert "Valid modes are 'partial', 'trim', and 'strict'." == str(e.value) def test_extract_array_1d_even(): '''Extract 1 d arrays. All dimensions are treated the same, so we can test in 1 dim. ''' assert np.all(extract_array(np.arange(4), (2, ), (0, ), fill_value=-99) == np.array([-99, 0])) for i in [1, 2, 3]: assert np.all(extract_array(np.arange(4), (2, ), (i, )) == np.array([i - 1, i])) assert np.all(extract_array(np.arange(4.), (2, ), (4, ), fill_value=np.inf) == np.array([3, np.inf])) def test_extract_array_1d_odd(): '''Extract 1 d arrays. All dimensions are treated the same, so we can test in 1 dim. The first few lines test the most error-prone part: Extraction of an array on the boundaries. Additional tests (e.g. dtype of return array) are done for the last case only. ''' assert np.all(extract_array(np.arange(4), (3,), (-1, ), fill_value=-99) == np.array([-99, -99, 0])) assert np.all(extract_array(np.arange(4), (3,), (0, ), fill_value=-99) == np.array([-99, 0, 1])) for i in [1, 2]: assert np.all(extract_array(np.arange(4), (3,), (i, )) == np.array([i-1, i, i+1])) assert np.all(extract_array(np.arange(4), (3,), (3, ), fill_value=-99) == np.array([2, 3, -99])) arrayin = np.arange(4.) extracted = extract_array(arrayin, (3,), (4, )) assert extracted[0] == 3 assert np.isnan(extracted[1]) # since I cannot use `==` to test for nan assert extracted.dtype == arrayin.dtype def test_extract_array_1d(): """In 1d, shape can be int instead of tuple""" assert np.all(extract_array(np.arange(4), 3, (-1, ), fill_value=-99) == np.array([-99, -99, 0])) assert np.all(extract_array(np.arange(4), 3, -1, fill_value=-99) == np.array([-99, -99, 0])) def test_extract_Array_float(): """integer is at bin center""" for a in np.arange(2.51, 3.49, 0.1): assert np.all(extract_array(np.arange(5), 3, a) == np.array([2, 3, 4])) def test_extract_array_1d_trim(): '''Extract 1 d arrays. All dimensions are treated the same, so we can test in 1 dim. ''' assert np.all(extract_array(np.arange(4), (2, ), (0, ), mode='trim') == np.array([0])) for i in [1, 2, 3]: assert np.all(extract_array(np.arange(4), (2, ), (i, ), mode='trim') == np.array([i - 1, i])) assert np.all(extract_array(np.arange(4.), (2, ), (4, ), mode='trim') == np.array([3])) @pytest.mark.parametrize('mode', ['partial', 'trim', 'strict']) def test_extract_array_easy(mode): """ Test extract_array utility function. Test by extracting an array of ones out of an array of zeros. """ large_test_array = np.zeros((11, 11)) small_test_array = np.ones((5, 5)) large_test_array[3:8, 3:8] = small_test_array extracted_array = extract_array(large_test_array, (5, 5), (5, 5), mode=mode) assert np.all(extracted_array == small_test_array) def test_extract_array_return_pos(): '''Check that the return position is calculated correctly. The result will differ by mode. All test here are done in 1d because it's easier to construct correct test cases. ''' large_test_array = np.arange(5) for i in np.arange(-1, 6): extracted, new_pos = extract_array(large_test_array, 3, i, mode='partial', return_position=True) assert new_pos == (1, ) # Now check an array with an even number for i, expected in zip([1.49, 1.51, 3], [1.49, 0.51, 1]): extracted, new_pos = extract_array(large_test_array, (2,), (i,), mode='strict', return_position=True) assert new_pos == (expected, ) # For mode='trim' the answer actually depends for i, expected in zip(np.arange(-1, 6), (-1, 0, 1, 1, 1, 1, 1)): extracted, new_pos = extract_array(large_test_array, (3,), (i,), mode='trim', return_position=True) assert new_pos == (expected, ) def test_add_array_odd_shape(): """ Test add_array utility function. Test by adding an array of ones out of an array of zeros. """ large_test_array = np.zeros((11, 11)) small_test_array = np.ones((5, 5)) large_test_array_ref = large_test_array.copy() large_test_array_ref[3:8, 3:8] += small_test_array added_array = add_array(large_test_array, small_test_array, (5, 5)) assert np.all(added_array == large_test_array_ref) def test_add_array_even_shape(): """ Test add_array_2D utility function. Test by adding an array of ones out of an array of zeros. """ large_test_array = np.zeros((11, 11)) small_test_array = np.ones((4, 4)) large_test_array_ref = large_test_array.copy() large_test_array_ref[0:2, 0:2] += small_test_array[2:4, 2:4] added_array = add_array(large_test_array, small_test_array, (0, 0)) assert np.all(added_array == large_test_array_ref) @pytest.mark.parametrize(('position', 'subpixel_index'), zip(test_positions, test_position_indices)) def test_subpixel_indices(position, subpixel_index): """ Test subpixel_indices utility function. Test by asserting that the function returns correct results for given test values. """ assert np.all(subpixel_indices(position, subsampling) == subpixel_index) @pytest.mark.skipif('not HAS_SKIMAGE') class TestBlockReduce: def test_1d(self): """Test 1D array.""" data = np.arange(4) expected = np.array([1, 5]) result = block_reduce(data, 2) assert np.all(result == expected) def test_1d_mean(self): """Test 1D array with func=np.mean.""" data = np.arange(4) block_size = 2. expected = block_reduce(data, block_size, func=np.sum) / block_size result_mean = block_reduce(data, block_size, func=np.mean) assert np.all(result_mean == expected) def test_2d(self): """Test 2D array.""" data = np.arange(4).reshape(2, 2) expected = np.array([[6]]) result = block_reduce(data, 2) assert np.all(result == expected) def test_2d_mean(self): """Test 2D array with func=np.mean.""" data = np.arange(4).reshape(2, 2) block_size = 2. expected = (block_reduce(data, block_size, func=np.sum) / block_size**2) result = block_reduce(data, block_size, func=np.mean) assert np.all(result == expected) def test_2d_trim(self): """ Test trimming of 2D array when size is not perfectly divisible by block_size. """ data1 = np.arange(15).reshape(5, 3) result1 = block_reduce(data1, 2) data2 = data1[0:4, 0:2] result2 = block_reduce(data2, 2) assert np.all(result1 == result2) def test_block_size_broadcasting(self): """Test scalar block_size broadcasting.""" data = np.arange(16).reshape(4, 4) result1 = block_reduce(data, 2) result2 = block_reduce(data, (2, 2)) assert np.all(result1 == result2) def test_block_size_len(self): """Test block_size length.""" data = np.ones((2, 2)) with pytest.raises(ValueError): block_reduce(data, (2, 2, 2)) @pytest.mark.skipif('not HAS_SKIMAGE') class TestBlockReplicate: def test_1d(self): """Test 1D array.""" data = np.arange(2) expected = np.array([0, 0, 0.5, 0.5]) result = block_replicate(data, 2) assert np.all(result == expected) def test_1d_conserve_sum(self): """Test 1D array with conserve_sum=False.""" data = np.arange(2) block_size = 2. expected = block_replicate(data, block_size) * block_size result = block_replicate(data, block_size, conserve_sum=False) assert np.all(result == expected) def test_2d(self): """Test 2D array.""" data = np.arange(2).reshape(2, 1) expected = np.array([[0, 0], [0, 0], [0.25, 0.25], [0.25, 0.25]]) result = block_replicate(data, 2) assert np.all(result == expected) def test_2d_conserve_sum(self): """Test 2D array with conserve_sum=False.""" data = np.arange(6).reshape(2, 3) block_size = 2. expected = block_replicate(data, block_size) * block_size**2 result = block_replicate(data, block_size, conserve_sum=False) assert np.all(result == expected) def test_block_size_broadcasting(self): """Test scalar block_size broadcasting.""" data = np.arange(4).reshape(2, 2) result1 = block_replicate(data, 2) result2 = block_replicate(data, (2, 2)) assert np.all(result1 == result2) def test_block_size_len(self): """Test block_size length.""" data = np.arange(5) with pytest.raises(ValueError): block_replicate(data, (2, 2)) class TestCutout2D: def setup_class(self): self.data = np.arange(20.).reshape(5, 4) self.position = SkyCoord('13h11m29.96s -01d19m18.7s', frame='icrs') wcs = WCS(naxis=2) rho = np.pi / 3. scale = 0.05 / 3600. wcs.wcs.cd = [[scale*np.cos(rho), -scale*np.sin(rho)], [scale*np.sin(rho), scale*np.cos(rho)]] wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN'] wcs.wcs.crval = [self.position.ra.to_value(u.deg), self.position.dec.to_value(u.deg)] wcs.wcs.crpix = [3, 3] self.wcs = wcs def test_cutout(self): sizes = [3, 3*u.pixel, (3, 3), (3*u.pixel, 3*u.pix), (3., 3*u.pixel), (2.9, 3.3)] for size in sizes: position = (2.1, 1.9) c = Cutout2D(self.data, position, size) assert c.data.shape == (3, 3) assert c.data[1, 1] == 10 assert c.origin_original == (1, 1) assert c.origin_cutout == (0, 0) assert c.input_position_original == position assert_allclose(c.input_position_cutout, (1.1, 0.9)) assert c.position_original == (2., 2.) assert c.position_cutout == (1., 1.) assert c.center_original == (2., 2.) assert c.center_cutout == (1., 1.) assert c.bbox_original == ((1, 3), (1, 3)) assert c.bbox_cutout == ((0, 2), (0, 2)) assert c.slices_original == (slice(1, 4), slice(1, 4)) assert c.slices_cutout == (slice(0, 3), slice(0, 3)) def test_size_length(self): with pytest.raises(ValueError): Cutout2D(self.data, (2, 2), (1, 1, 1)) def test_size_units(self): for size in [3 * u.cm, (3, 3 * u.K)]: with pytest.raises(ValueError): Cutout2D(self.data, (2, 2), size) def test_size_pixel(self): """ Check size in derived pixel units. """ size = 0.3*u.arcsec / (0.1*u.arcsec/u.pixel) c = Cutout2D(self.data, (2, 2), size) assert c.data.shape == (3, 3) assert c.data[0, 0] == 5 assert c.slices_original == (slice(1, 4), slice(1, 4)) assert c.slices_cutout == (slice(0, 3), slice(0, 3)) def test_size_angle(self): c = Cutout2D(self.data, (2, 2), (0.1*u.arcsec), wcs=self.wcs) assert c.data.shape == (2, 2) assert c.data[0, 0] == 5 assert c.slices_original == (slice(1, 3), slice(1, 3)) assert c.slices_cutout == (slice(0, 2), slice(0, 2)) def test_size_angle_without_wcs(self): with pytest.raises(ValueError): Cutout2D(self.data, (2, 2), (3, 3 * u.arcsec)) def test_cutout_trim_overlap(self): c = Cutout2D(self.data, (0, 0), (3, 3), mode='trim') assert c.data.shape == (2, 2) assert c.data[0, 0] == 0 assert c.slices_original == (slice(0, 2), slice(0, 2)) assert c.slices_cutout == (slice(0, 2), slice(0, 2)) def test_cutout_partial_overlap(self): c = Cutout2D(self.data, (0, 0), (3, 3), mode='partial') assert c.data.shape == (3, 3) assert c.data[1, 1] == 0 assert c.slices_original == (slice(0, 2), slice(0, 2)) assert c.slices_cutout == (slice(1, 3), slice(1, 3)) def test_cutout_partial_overlap_fill_value(self): fill_value = -99 c = Cutout2D(self.data, (0, 0), (3, 3), mode='partial', fill_value=fill_value) assert c.data.shape == (3, 3) assert c.data[1, 1] == 0 assert c.data[0, 0] == fill_value def test_copy(self): data = np.copy(self.data) c = Cutout2D(data, (2, 3), (3, 3)) xy = (0, 0) value = 100. c.data[xy] = value xy_orig = c.to_original_position(xy) yx = xy_orig[::-1] assert data[yx] == value data = np.copy(self.data) c2 = Cutout2D(self.data, (2, 3), (3, 3), copy=True) c2.data[xy] = value assert data[yx] != value def test_to_from_large(self): position = (2, 2) c = Cutout2D(self.data, position, (3, 3)) xy = (0, 0) result = c.to_cutout_position(c.to_original_position(xy)) assert_allclose(result, xy) def test_skycoord_without_wcs(self): with pytest.raises(ValueError): Cutout2D(self.data, self.position, (3, 3)) def test_skycoord(self): c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs) skycoord_original = self.position.from_pixel(c.center_original[1], c.center_original[0], self.wcs) skycoord_cutout = self.position.from_pixel(c.center_cutout[1], c.center_cutout[0], c.wcs) assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra) assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec) def test_skycoord_partial(self): c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs, mode='partial') skycoord_original = self.position.from_pixel(c.center_original[1], c.center_original[0], self.wcs) skycoord_cutout = self.position.from_pixel(c.center_cutout[1], c.center_cutout[0], c.wcs) assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra) assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)
08ba7c544a79ee02e207efa294cd20ce99ec9649e44d642fe9b3113d2838a028
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from .. import FlagCollection def test_init(): FlagCollection(shape=(1, 2, 3)) def test_init_noshape(): with pytest.raises(Exception) as exc: FlagCollection() assert exc.value.args[0] == ('FlagCollection should be initialized with ' 'the shape of the data') def test_init_notiterable(): with pytest.raises(Exception) as exc: FlagCollection(shape=1.) assert exc.value.args[0] == ('FlagCollection shape should be ' 'an iterable object') def test_setitem(): f = FlagCollection(shape=(1, 2, 3)) f['a'] = np.ones((1, 2, 3)).astype(float) f['b'] = np.ones((1, 2, 3)).astype(int) f['c'] = np.ones((1, 2, 3)).astype(bool) f['d'] = np.ones((1, 2, 3)).astype(str) @pytest.mark.parametrize(('value'), [1, 1., 'spam', [1, 2, 3], (1., 2., 3.)]) def test_setitem_invalid_type(value): f = FlagCollection(shape=(1, 2, 3)) with pytest.raises(Exception) as exc: f['a'] = value assert exc.value.args[0] == 'flags should be given as a Numpy array' def test_setitem_invalid_shape(): f = FlagCollection(shape=(1, 2, 3)) with pytest.raises(ValueError) as exc: f['a'] = np.ones((3, 2, 1)) assert exc.value.args[0].startswith('flags array shape') assert exc.value.args[0].endswith('does not match data shape (1, 2, 3)')
b103e89af29dfd9a0b08635457a256779dedeb9088bf697562fede8b1a6f7b28
# Licensed under a 3-clause BSD style license - see LICENSE.rst import textwrap from collections import OrderedDict import pytest import numpy as np from numpy.testing import assert_array_equal from ..nddata import NDData from ..nduncertainty import NDUncertainty, StdDevUncertainty from ... import units as u from ...utils import NumpyRNGContext class FakeUncertainty(NDUncertainty): @property def uncertainty_type(self): return 'fake' def _propagate_add(self, data, final_data): pass def _propagate_subtract(self, data, final_data): pass def _propagate_multiply(self, data, final_data): pass def _propagate_divide(self, data, final_data): pass class FakeNumpyArray: """ Class that has a few of the attributes of a numpy array. These attributes are checked for by NDData. """ def __init__(self): super().__init__() def shape(self): pass def __getitem__(self): pass def __array__(self): pass @property def dtype(self): return 'fake' class MinimalUncertainty: """ Define the minimum attributes acceptable as an uncertainty object. """ def __init__(self, value): self._uncertainty = value @property def uncertainty_type(self): return "totally and completely fake" class BadNDDataSubclass(NDData): def __init__(self, data, uncertainty=None, mask=None, wcs=None, meta=None, unit=None): self._data = data self._uncertainty = uncertainty self._mask = mask self._wcs = wcs self._unit = unit self._meta = meta # Setter tests def test_uncertainty_setter(): nd = NDData([1, 2, 3]) good_uncertainty = MinimalUncertainty(5) nd.uncertainty = good_uncertainty assert nd.uncertainty is good_uncertainty # Check the fake uncertainty (minimal does not work since it has no # parent_nddata attribute from NDUncertainty) nd.uncertainty = FakeUncertainty(5) assert nd.uncertainty.parent_nddata is nd # Check that it works if the uncertainty was set during init nd = NDData(nd) assert isinstance(nd.uncertainty, FakeUncertainty) nd.uncertainty = 10 assert not isinstance(nd.uncertainty, FakeUncertainty) assert nd.uncertainty.array == 10 def test_mask_setter(): # Since it just changes the _mask attribute everything should work nd = NDData([1, 2, 3]) nd.mask = True assert nd.mask nd.mask = False assert not nd.mask # Check that it replaces a mask from init nd = NDData(nd, mask=True) assert nd.mask nd.mask = False assert not nd.mask # Init tests def test_nddata_empty(): with pytest.raises(TypeError): NDData() # empty initializer should fail def test_nddata_init_data_nonarray(): inp = [1, 2, 3] nd = NDData(inp) assert (np.array(inp) == nd.data).all() def test_nddata_init_data_ndarray(): # random floats with NumpyRNGContext(123): nd = NDData(np.random.random((10, 10))) assert nd.data.shape == (10, 10) assert nd.data.size == 100 assert nd.data.dtype == np.dtype(float) # specific integers nd = NDData(np.array([[1, 2, 3], [4, 5, 6]])) assert nd.data.size == 6 assert nd.data.dtype == np.dtype(int) # Tests to ensure that creating a new NDData object copies by *reference*. a = np.ones((10, 10)) nd_ref = NDData(a) a[0, 0] = 0 assert nd_ref.data[0, 0] == 0 # Except we choose copy=True a = np.ones((10, 10)) nd_ref = NDData(a, copy=True) a[0, 0] = 0 assert nd_ref.data[0, 0] != 0 def test_nddata_init_data_maskedarray(): with NumpyRNGContext(456): NDData(np.random.random((10, 10)), mask=np.random.random((10, 10)) > 0.5) # Another test (just copied here) with NumpyRNGContext(12345): a = np.random.randn(100) marr = np.ma.masked_where(a > 0, a) nd = NDData(marr) # check that masks and data match assert_array_equal(nd.mask, marr.mask) assert_array_equal(nd.data, marr.data) # check that they are both by reference marr.mask[10] = ~marr.mask[10] marr.data[11] = 123456789 assert_array_equal(nd.mask, marr.mask) assert_array_equal(nd.data, marr.data) # or not if we choose copy=True nd = NDData(marr, copy=True) marr.mask[10] = ~marr.mask[10] marr.data[11] = 0 assert nd.mask[10] != marr.mask[10] assert nd.data[11] != marr.data[11] @pytest.mark.parametrize('data', [np.array([1, 2, 3]), 5]) def test_nddata_init_data_quantity(data): # Test an array and a scalar because a scalar Quantity does not always # behaves the same way as an array. quantity = data * u.adu ndd = NDData(quantity) assert ndd.unit == quantity.unit assert_array_equal(ndd.data, np.array(quantity.value)) if ndd.data.size > 1: # check that if it is an array it is not copied quantity.value[1] = 100 assert ndd.data[1] == quantity.value[1] # or is copied if we choose copy=True ndd = NDData(quantity, copy=True) quantity.value[1] = 5 assert ndd.data[1] != quantity.value[1] def test_nddata_init_data_masked_quantity(): a = np.array([2, 3]) q = a * u.m m = False mq = np.ma.array(q, mask=m) nd = NDData(mq) assert_array_equal(nd.data, a) # This test failed before the change in nddata init because the masked # arrays data (which in fact was a quantity was directly saved) assert nd.unit == u.m assert not isinstance(nd.data, u.Quantity) np.testing.assert_array_equal(nd.mask, np.array(m)) def test_nddata_init_data_nddata(): nd1 = NDData(np.array([1])) nd2 = NDData(nd1) assert nd2.wcs == nd1.wcs assert nd2.uncertainty == nd1.uncertainty assert nd2.mask == nd1.mask assert nd2.unit == nd1.unit assert nd2.meta == nd1.meta # Check that it is copied by reference nd1 = NDData(np.ones((5, 5))) nd2 = NDData(nd1) assert nd1.data is nd2.data # Check that it is really copied if copy=True nd2 = NDData(nd1, copy=True) nd1.data[2, 3] = 10 assert nd1.data[2, 3] != nd2.data[2, 3] # Now let's see what happens if we have all explicitly set nd1 = NDData(np.array([1]), mask=False, uncertainty=10, unit=u.s, meta={'dest': 'mordor'}, wcs=10) nd2 = NDData(nd1) assert nd2.data is nd1.data assert nd2.wcs == nd1.wcs assert nd2.uncertainty.array == nd1.uncertainty.array assert nd2.mask == nd1.mask assert nd2.unit == nd1.unit assert nd2.meta == nd1.meta # now what happens if we overwrite them all too nd3 = NDData(nd1, mask=True, uncertainty=200, unit=u.km, meta={'observer': 'ME'}, wcs=4) assert nd3.data is nd1.data assert nd3.wcs != nd1.wcs assert nd3.uncertainty.array != nd1.uncertainty.array assert nd3.mask != nd1.mask assert nd3.unit != nd1.unit assert nd3.meta != nd1.meta def test_nddata_init_data_nddata_subclass(): # There might be some incompatible subclasses of NDData around. bnd = BadNDDataSubclass(False, True, 3, 2, 'gollum', 100) # Before changing the NDData init this would not have raised an error but # would have lead to a compromised nddata instance with pytest.raises(TypeError): NDData(bnd) # but if it has no actual incompatible attributes it passes bnd_good = BadNDDataSubclass(np.array([1, 2]), True, 3, 2, {'enemy': 'black knight'}, u.km) nd = NDData(bnd_good) assert nd.unit == bnd_good.unit assert nd.meta == bnd_good.meta assert nd.uncertainty.array == bnd_good.uncertainty assert nd.mask == bnd_good.mask assert nd.wcs == bnd_good.wcs assert nd.data is bnd_good.data def test_nddata_init_data_fail(): # First one is sliceable but has no shape, so should fail. with pytest.raises(TypeError): NDData({'a': 'dict'}) # This has a shape but is not sliceable class Shape: def __init__(self): self.shape = 5 def __repr__(self): return '7' with pytest.raises(TypeError): NDData(Shape()) def test_nddata_init_data_fakes(): ndd1 = NDData(FakeNumpyArray()) # First make sure that NDData isn't converting its data to a numpy array. assert isinstance(ndd1.data, FakeNumpyArray) # Make a new NDData initialized from an NDData ndd2 = NDData(ndd1) # Check that the data wasn't converted to numpy assert isinstance(ndd2.data, FakeNumpyArray) # Specific parameters def test_param_uncertainty(): u = StdDevUncertainty(array=np.ones((5, 5))) d = NDData(np.ones((5, 5)), uncertainty=u) # Test that the parent_nddata is set. assert d.uncertainty.parent_nddata is d # Test conflicting uncertainties (other NDData) u2 = StdDevUncertainty(array=np.ones((5, 5))*2) d2 = NDData(d, uncertainty=u2) assert d2.uncertainty is u2 assert d2.uncertainty.parent_nddata is d2 def test_param_wcs(): # Since everything is allowed we only need to test something nd = NDData([1], wcs=3) assert nd.wcs == 3 # Test conflicting wcs (other NDData) nd2 = NDData(nd, wcs=2) assert nd2.wcs == 2 def test_param_meta(): # everything dict-like is allowed with pytest.raises(TypeError): NDData([1], meta=3) nd = NDData([1, 2, 3], meta={}) assert len(nd.meta) == 0 nd = NDData([1, 2, 3]) assert isinstance(nd.meta, OrderedDict) assert len(nd.meta) == 0 # Test conflicting meta (other NDData) nd2 = NDData(nd, meta={'image': 'sun'}) assert len(nd2.meta) == 1 nd3 = NDData(nd2, meta={'image': 'moon'}) assert len(nd3.meta) == 1 assert nd3.meta['image'] == 'moon' def test_param_mask(): # Since everything is allowed we only need to test something nd = NDData([1], mask=False) assert not nd.mask # Test conflicting mask (other NDData) nd2 = NDData(nd, mask=True) assert nd2.mask # (masked array) nd3 = NDData(np.ma.array([1], mask=False), mask=True) assert nd3.mask # (masked quantity) mq = np.ma.array(np.array([2, 3])*u.m, mask=False) nd4 = NDData(mq, mask=True) assert nd4.mask def test_param_unit(): with pytest.raises(ValueError): NDData(np.ones((5, 5)), unit="NotAValidUnit") NDData([1, 2, 3], unit='meter') # Test conflicting units (quantity as data) q = np.array([1, 2, 3]) * u.m nd = NDData(q, unit='cm') assert nd.unit != q.unit assert nd.unit == u.cm # (masked quantity) mq = np.ma.array(np.array([2, 3])*u.m, mask=False) nd2 = NDData(mq, unit=u.s) assert nd2.unit == u.s # (another NDData as data) nd3 = NDData(nd, unit='km') assert nd3.unit == u.km # Check that the meta descriptor is working as expected. The MetaBaseTest class # takes care of defining all the tests, and we simply have to define the class # and any minimal set of args to pass. from ...utils.tests.test_metadata import MetaBaseTest class TestMetaNDData(MetaBaseTest): test_class = NDData args = np.array([[1.]]) # Representation tests def test_nddata_str(): arr1d = NDData(np.array([1, 2, 3])) assert str(arr1d) == '[1 2 3]' arr2d = NDData(np.array([[1, 2], [3, 4]])) assert str(arr2d) == textwrap.dedent(""" [[1 2] [3 4]]"""[1:]) arr3d = NDData(np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])) assert str(arr3d) == textwrap.dedent(""" [[[1 2] [3 4]] [[5 6] [7 8]]]"""[1:]) def test_nddata_repr(): arr1d = NDData(np.array([1, 2, 3])) assert repr(arr1d) == 'NDData([1, 2, 3])' arr2d = NDData(np.array([[1, 2], [3, 4]])) assert repr(arr2d) == textwrap.dedent(""" NDData([[1, 2], [3, 4]])"""[1:]) arr3d = NDData(np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])) assert repr(arr3d) == textwrap.dedent(""" NDData([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])"""[1:]) # Not supported features def test_slicing_not_supported(): ndd = NDData(np.ones((5, 5))) with pytest.raises(TypeError): ndd[0] def test_arithmetic_not_supported(): ndd = NDData(np.ones((5, 5))) with pytest.raises(TypeError): ndd + ndd
aff2afb3cc50e5139d4821016328fb8d9f6d3f76cab90dfdce93d4376a86d296
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This module contains tests of a class equivalent to pre-1.0 NDData. import pytest import numpy as np from ..nddata import NDData from ..compat import NDDataArray from ..nduncertainty import StdDevUncertainty from ... import units as u NDDATA_ATTRIBUTES = ['mask', 'flags', 'uncertainty', 'unit', 'shape', 'size', 'dtype', 'ndim', 'wcs', 'convert_unit_to'] def test_nddataarray_has_attributes_of_old_nddata(): ndd = NDDataArray([1, 2, 3]) for attr in NDDATA_ATTRIBUTES: assert hasattr(ndd, attr) def test_nddata_simple(): nd = NDDataArray(np.zeros((10, 10))) assert nd.shape == (10, 10) assert nd.size == 100 assert nd.dtype == np.dtype(float) def test_nddata_parameters(): # Test for issue 4620 nd = NDDataArray(data=np.zeros((10, 10))) assert nd.shape == (10, 10) assert nd.size == 100 assert nd.dtype == np.dtype(float) # Change order; `data` has to be given explicitly here nd = NDDataArray(meta={}, data=np.zeros((10, 10))) assert nd.shape == (10, 10) assert nd.size == 100 assert nd.dtype == np.dtype(float) # Pass uncertainty as second implicit argument data = np.zeros((10, 10)) uncertainty = StdDevUncertainty(0.1 + np.zeros_like(data)) nd = NDDataArray(data, uncertainty) assert nd.shape == (10, 10) assert nd.size == 100 assert nd.dtype == np.dtype(float) assert nd.uncertainty == uncertainty def test_nddata_conversion(): nd = NDDataArray(np.array([[1, 2, 3], [4, 5, 6]])) assert nd.size == 6 assert nd.dtype == np.dtype(int) @pytest.mark.parametrize('flags_in', [ np.array([True, False]), np.array([1, 0]), [True, False], [1, 0], np.array(['a', 'b']), ['a', 'b']]) def test_nddata_flags_init_without_np_array(flags_in): ndd = NDDataArray([1, 1], flags=flags_in) assert (ndd.flags == flags_in).all() @pytest.mark.parametrize(('shape'), [(10,), (5, 5), (3, 10, 10)]) def test_nddata_flags_invalid_shape(shape): with pytest.raises(ValueError) as exc: NDDataArray(np.zeros((10, 10)), flags=np.ones(shape)) assert exc.value.args[0] == 'dimensions of flags do not match data' def test_convert_unit_to(): # convert_unit_to should return a copy of its input d = NDDataArray(np.ones((5, 5))) d.unit = 'km' d.uncertainty = StdDevUncertainty(0.1 + np.zeros_like(d)) # workaround because zeros_like does not support dtype arg until v1.6 # and NDData accepts only bool ndarray as mask tmp = np.zeros_like(d.data) d.mask = np.array(tmp, dtype=bool) d1 = d.convert_unit_to('m') assert np.all(d1.data == np.array(1000.0)) assert np.all(d1.uncertainty.array == 1000.0 * d.uncertainty.array) assert d1.unit == u.m # changing the output mask should not change the original d1.mask[0, 0] = True assert d.mask[0, 0] != d1.mask[0, 0] d.flags = np.zeros_like(d.data) d1 = d.convert_unit_to('m') # check that subclasses can require wcs and/or unit to be present and use # _arithmetic and convert_unit_to class SubNDData(NDDataArray): """ Subclass for test initialization of subclasses in NDData._arithmetic and NDData.convert_unit_to """ def __init__(self, *arg, **kwd): super().__init__(*arg, **kwd) if self.unit is None: raise ValueError("Unit for subclass must be specified") if self.wcs is None: raise ValueError("WCS for subclass must be specified") def test_init_of_subclass_in_convert_unit_to(): data = np.ones([10, 10]) arr1 = SubNDData(data, unit='m', wcs=5) result = arr1.convert_unit_to('km') np.testing.assert_array_equal(arr1.data, 1000 * result.data) # Test for issue #4129: def test_nddataarray_from_nddataarray(): ndd1 = NDDataArray([1., 4., 9.], uncertainty=StdDevUncertainty([1., 2., 3.]), flags=[0, 1, 0]) ndd2 = NDDataArray(ndd1) # Test that the 2 instances point to the same objects and aren't just # equal; this is explicitly documented for the main data array and we # probably want to catch any future change in behaviour for the other # attributes too and ensure they are intentional. assert ndd2.data is ndd1.data assert ndd2.uncertainty is ndd1.uncertainty assert ndd2.flags is ndd1.flags assert ndd2.meta == ndd1.meta # Test for issue #4137: def test_nddataarray_from_nddata(): ndd1 = NDData([1., 4., 9.], uncertainty=StdDevUncertainty([1., 2., 3.])) ndd2 = NDDataArray(ndd1) assert ndd2.data is ndd1.data assert ndd2.uncertainty is ndd1.uncertainty assert ndd2.meta == ndd1.meta
517a5064b8a4118d5fd34e73165abf378cc32dc615c03e8cc4d8ee81199f3101
# Licensed under a 3-clause BSD style license - see LICENSE.rst import inspect import pytest import numpy as np from ...tests.helper import catch_warnings from ...utils.exceptions import AstropyUserWarning from ... import units as u from ..nddata import NDData from ..decorators import support_nddata class CCDData(NDData): pass @support_nddata def wrapped_function_1(data, wcs=None, unit=None): return data, wcs, unit def test_pass_numpy(): data_in = np.array([1, 2, 3]) data_out, wcs_out, unit_out = wrapped_function_1(data=data_in) assert data_out is data_in assert wcs_out is None assert unit_out is None def test_pass_all_separate(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy data_out, wcs_out, unit_out = wrapped_function_1(data=data_in, wcs=wcs_in, unit=unit_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in def test_pass_nddata(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in) data_out, wcs_out, unit_out = wrapped_function_1(nddata_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in def test_pass_nddata_and_explicit(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy unit_in_alt = u.mJy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in) with catch_warnings() as w: data_out, wcs_out, unit_out = wrapped_function_1(nddata_in, unit=unit_in_alt) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in_alt assert len(w) == 1 assert str(w[0].message) == ("Property unit has been passed explicitly and as " "an NDData property, using explicitly specified value") def test_pass_nddata_ignored(): data_in = np.array([1, 2, 3]) wcs_in = "the wcs" unit_in = u.Jy nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in, mask=[0, 1, 0]) with catch_warnings() as w: data_out, wcs_out, unit_out = wrapped_function_1(nddata_in) assert data_out is data_in assert wcs_out is wcs_in assert unit_out is unit_in assert len(w) == 1 assert str(w[0].message) == ("The following attributes were set on the data " "object, but will be ignored by the function: mask") def test_incorrect_first_argument(): with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_2(something, wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_3(something, data, wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" with pytest.raises(ValueError) as exc: @support_nddata def wrapped_function_4(wcs=None, unit=None): pass assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`" def test_wrap_function_no_kwargs(): @support_nddata def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) assert wrapped_function_5(nddata_in, [1, 2, 3]) is data_in def test_wrap_function_repack_valid(): @support_nddata(repack=True, returns=['data']) def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) nddata_out = wrapped_function_5(nddata_in, [1, 2, 3]) assert isinstance(nddata_out, NDData) assert nddata_out.data is data_in def test_wrap_function_accepts(): class MyData(NDData): pass @support_nddata(accepts=MyData) def wrapped_function_5(data, other_data): return data data_in = np.array([1, 2, 3]) nddata_in = NDData(data_in) mydata_in = MyData(data_in) assert wrapped_function_5(mydata_in, [1, 2, 3]) is data_in with pytest.raises(TypeError) as exc: wrapped_function_5(nddata_in, [1, 2, 3]) assert exc.value.args[0] == "Only NDData sub-classes that inherit from MyData can be used by this function" def test_wrap_preserve_signature_docstring(): @support_nddata def wrapped_function_6(data, wcs=None, unit=None): """ An awesome function """ pass if wrapped_function_6.__doc__ is not None: assert wrapped_function_6.__doc__.strip() == "An awesome function" signature = inspect.formatargspec( *inspect.getfullargspec(wrapped_function_6)) assert signature == "(data, wcs=None, unit=None)" def test_setup_failures1(): # repack but no returns with pytest.raises(ValueError): support_nddata(repack=True) def test_setup_failures2(): # returns but no repack with pytest.raises(ValueError): support_nddata(returns=['data']) def test_setup_failures9(): # keeps but no repack with pytest.raises(ValueError): support_nddata(keeps=['unit']) def test_setup_failures3(): # same attribute in keeps and returns with pytest.raises(ValueError): support_nddata(repack=True, keeps=['mask'], returns=['data', 'mask']) def test_setup_failures4(): # function accepts *args with pytest.raises(ValueError): @support_nddata def test(data, *args): pass def test_setup_failures10(): # function accepts **kwargs with pytest.raises(ValueError): @support_nddata def test(data, **kwargs): pass def test_setup_failures5(): # function accepts *args (or **kwargs) with pytest.raises(ValueError): @support_nddata def test(data, *args): pass def test_setup_failures6(): # First argument is not data with pytest.raises(ValueError): @support_nddata def test(img): pass def test_setup_failures7(): # accepts CCDData but was given just an NDData with pytest.raises(TypeError): @support_nddata(accepts=CCDData) def test(data): pass test(NDData(np.ones((3, 3)))) def test_setup_failures8(): # function returns a different amount of arguments than specified. Using # NDData here so we don't get into troubles when creating a CCDData without # unit! with pytest.raises(ValueError): @support_nddata(repack=True, returns=['data', 'mask']) def test(data): return 10 test(NDData(np.ones((3, 3)))) # do NOT use CCDData here. def test_setup_failures11(): # function accepts no arguments with pytest.raises(ValueError): @support_nddata def test(): pass def test_setup_numpyarray_default(): # It should be possible (even if it's not advisable to use mutable # defaults) to have a numpy array as default value. @support_nddata def func(data, wcs=np.array([1, 2, 3])): return wcs def test_still_accepts_other_input(): @support_nddata(repack=True, returns=['data']) def test(data): return data assert isinstance(test(NDData(np.ones((3, 3)))), NDData) assert isinstance(test(10), int) assert isinstance(test([1, 2, 3]), list) def test_accepting_property_normal(): # Accepts a mask attribute and takes it from the input @support_nddata def test(data, mask=None): return mask ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._mask = np.zeros((3, 3)) assert np.all(test(ndd) == 0) # Use the explicitly given one (raises a Warning) with catch_warnings(AstropyUserWarning) as w: assert test(ndd, mask=10) == 10 assert len(w) == 1 def test_parameter_default_identical_to_explicit_passed_argument(): # If the default is identical to the explicitly passed argument this # should still raise a Warning and use the explicit one. @support_nddata def func(data, wcs=[1, 2, 3]): return wcs with catch_warnings(AstropyUserWarning) as w: assert func(NDData(1, wcs=[1, 2]), [1, 2, 3]) == [1, 2, 3] assert len(w) == 1 with catch_warnings(AstropyUserWarning) as w: assert func(NDData(1, wcs=[1, 2])) == [1, 2] assert len(w) == 0 def test_accepting_property_notexist(): # Accepts flags attribute but NDData doesn't have one @support_nddata def test(data, flags=10): return flags ndd = NDData(np.ones((3, 3))) test(ndd) def test_accepting_property_translated(): # Accepts a error attribute and we want to pass in uncertainty! @support_nddata(mask='masked') def test(data, masked=None): return masked ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._mask = np.zeros((3, 3)) assert np.all(test(ndd) == 0) # Use the explicitly given one (raises a Warning) with catch_warnings(AstropyUserWarning) as w: assert test(ndd, masked=10) == 10 assert len(w) == 1 def test_accepting_property_meta_empty(): # Meta is always set (OrderedDict) so it has a special case that it's # ignored if it's empty but not None @support_nddata def test(data, meta=None): return meta ndd = NDData(np.ones((3, 3))) assert test(ndd) is None ndd._meta = {'a': 10} assert test(ndd) == {'a': 10}
a87fef9066aaeeccf468f027e1370a2697afb3d7cc401a6d61555fc50516b205
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy.testing import assert_array_equal from ..nduncertainty import (StdDevUncertainty, NDUncertainty, IncompatibleUncertaintiesException, UnknownUncertainty) from ..nddata import NDData from ... import units as u # Regarding setter tests: # No need to test setters since the uncertainty is considered immutable after # creation except of the parent_nddata attribute and this accepts just # everything. # Additionally they should be covered by NDData, NDArithmeticMixin which rely # on it # Regarding propagate, _convert_uncert, _propagate_* tests: # They should be covered by NDArithmeticMixin since there is generally no need # to test them without this mixin. # Regarding __getitem__ tests: # Should be covered by NDSlicingMixin. # Regarding StdDevUncertainty tests: # This subclass only overrides the methods for propagation so the same # they should be covered in NDArithmeticMixin. # Not really fake but the minimum an uncertainty has to override not to be # abstract. class FakeUncertainty(NDUncertainty): @property def uncertainty_type(self): return 'fake' def _propagate_add(self, data, final_data): pass def _propagate_subtract(self, data, final_data): pass def _propagate_multiply(self, data, final_data): pass def _propagate_divide(self, data, final_data): pass # Test the fake (added also StdDevUncertainty which should behave identical) @pytest.mark.parametrize(('UncertClass'), [FakeUncertainty, StdDevUncertainty, UnknownUncertainty]) def test_init_fake_with_list(UncertClass): fake_uncert = UncertClass([1, 2, 3]) assert_array_equal(fake_uncert.array, np.array([1, 2, 3])) # Copy makes no difference since casting a list to an np.ndarray always # makes a copy. # But let's give the uncertainty a unit too fake_uncert = UncertClass([1, 2, 3], unit=u.adu) assert_array_equal(fake_uncert.array, np.array([1, 2, 3])) assert fake_uncert.unit is u.adu @pytest.mark.parametrize(('UncertClass'), [FakeUncertainty, StdDevUncertainty, UnknownUncertainty]) def test_init_fake_with_ndarray(UncertClass): uncert = np.arange(100).reshape(10, 10) fake_uncert = UncertClass(uncert) # Numpy Arrays are copied by default assert_array_equal(fake_uncert.array, uncert) assert fake_uncert.array is not uncert # Now try it without copy fake_uncert = UncertClass(uncert, copy=False) assert fake_uncert.array is uncert # let's provide a unit fake_uncert = UncertClass(uncert, unit=u.adu) assert_array_equal(fake_uncert.array, uncert) assert fake_uncert.array is not uncert assert fake_uncert.unit is u.adu @pytest.mark.parametrize(('UncertClass'), [FakeUncertainty, StdDevUncertainty, UnknownUncertainty]) def test_init_fake_with_quantity(UncertClass): uncert = np.arange(10).reshape(2, 5) * u.adu fake_uncert = UncertClass(uncert) # Numpy Arrays are copied by default assert_array_equal(fake_uncert.array, uncert.value) assert fake_uncert.array is not uncert.value assert fake_uncert.unit is u.adu # Try without copy (should not work, quantity.value always returns a copy) fake_uncert = UncertClass(uncert, copy=False) assert fake_uncert.array is not uncert.value assert fake_uncert.unit is u.adu # Now try with an explicit unit parameter too fake_uncert = UncertClass(uncert, unit=u.m) assert_array_equal(fake_uncert.array, uncert.value) # No conversion done assert fake_uncert.array is not uncert.value assert fake_uncert.unit is u.m # It took the explicit one @pytest.mark.parametrize(('UncertClass'), [FakeUncertainty, StdDevUncertainty, UnknownUncertainty]) def test_init_fake_with_fake(UncertClass): uncert = np.arange(5).reshape(5, 1) fake_uncert1 = UncertClass(uncert) fake_uncert2 = UncertClass(fake_uncert1) assert_array_equal(fake_uncert2.array, uncert) assert fake_uncert2.array is not uncert # Without making copies fake_uncert1 = UncertClass(uncert, copy=False) fake_uncert2 = UncertClass(fake_uncert1, copy=False) assert_array_equal(fake_uncert2.array, fake_uncert1.array) assert fake_uncert2.array is fake_uncert1.array # With a unit uncert = np.arange(5).reshape(5, 1) * u.adu fake_uncert1 = UncertClass(uncert) fake_uncert2 = UncertClass(fake_uncert1) assert_array_equal(fake_uncert2.array, uncert.value) assert fake_uncert2.array is not uncert.value assert fake_uncert2.unit is u.adu # With a unit and an explicit unit-parameter fake_uncert2 = UncertClass(fake_uncert1, unit=u.cm) assert_array_equal(fake_uncert2.array, uncert.value) assert fake_uncert2.array is not uncert.value assert fake_uncert2.unit is u.cm @pytest.mark.parametrize(('UncertClass'), [FakeUncertainty, StdDevUncertainty, UnknownUncertainty]) def test_init_fake_with_somethingElse(UncertClass): # What about a dict? uncert = {'rdnoise': 2.9, 'gain': 0.6} fake_uncert = UncertClass(uncert) assert fake_uncert.array == uncert # We can pass a unit too but since we cannot do uncertainty propagation # the interpretation is up to the user fake_uncert = UncertClass(uncert, unit=u.s) assert fake_uncert.array == uncert assert fake_uncert.unit is u.s # So, now check what happens if copy is False fake_uncert = UncertClass(uncert, copy=False) assert fake_uncert.array == uncert assert id(fake_uncert) != id(uncert) # dicts cannot be referenced without copy # TODO : Find something that can be referenced without copy :-) def test_init_fake_with_StdDevUncertainty(): # Different instances of uncertainties are not directly convertible so this # should fail uncert = np.arange(5).reshape(5, 1) std_uncert = StdDevUncertainty(uncert) with pytest.raises(IncompatibleUncertaintiesException): FakeUncertainty(std_uncert) # Ok try it the other way around fake_uncert = FakeUncertainty(uncert) with pytest.raises(IncompatibleUncertaintiesException): StdDevUncertainty(fake_uncert) def test_uncertainty_type(): fake_uncert = FakeUncertainty([10, 2]) assert fake_uncert.uncertainty_type == 'fake' std_uncert = StdDevUncertainty([10, 2]) assert std_uncert.uncertainty_type == 'std' def test_uncertainty_correlated(): fake_uncert = FakeUncertainty([10, 2]) assert not fake_uncert.supports_correlated std_uncert = StdDevUncertainty([10, 2]) assert std_uncert.supports_correlated def test_for_leak_with_uncertainty(): # Regression test for memory leak because of cyclic references between # NDData and uncertainty from collections import defaultdict from gc import get_objects def test_leak(func, specific_objects=None): """Function based on gc.get_objects to determine if any object or a specific object leaks. It requires a function to be given and if any objects survive the function scope it's considered a leak (so don't return anything). """ before = defaultdict(int) for i in get_objects(): before[type(i)] += 1 func() after = defaultdict(int) for i in get_objects(): after[type(i)] += 1 if specific_objects is None: assert all(after[k] - before[k] == 0 for k in after) else: assert after[specific_objects] - before[specific_objects] == 0 def non_leaker_nddata(): # Without uncertainty there is no reason to assume that there is a # memory leak but test it nevertheless. NDData(np.ones(100)) def leaker_nddata(): # With uncertainty there was a memory leak! NDData(np.ones(100), uncertainty=StdDevUncertainty(np.ones(100))) test_leak(non_leaker_nddata, NDData) test_leak(leaker_nddata, NDData) # Same for NDDataArray: from ..compat import NDDataArray def non_leaker_nddataarray(): NDDataArray(np.ones(100)) def leaker_nddataarray(): NDDataArray(np.ones(100), uncertainty=StdDevUncertainty(np.ones(100))) test_leak(non_leaker_nddataarray, NDDataArray) test_leak(leaker_nddataarray, NDDataArray) def test_for_stolen_uncertainty(): # Sharing uncertainties should not overwrite the parent_nddata attribute ndd1 = NDData(1, uncertainty=1) ndd2 = NDData(2, uncertainty=ndd1.uncertainty) # uncertainty.parent_nddata.data should be the original data! assert ndd1.uncertainty.parent_nddata.data == ndd1.data
675a6e0b2d51667a9aa3de38025e8fb2d81eb4c31c8accac2d6615756c0ca8f5
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy.testing import assert_array_equal from ... import NDData, NDSlicingMixin from ...nduncertainty import NDUncertainty, StdDevUncertainty from .... import units as u # Just add the Mixin to NDData # TODO: Make this use NDDataRef instead! class NDDataSliceable(NDSlicingMixin, NDData): pass # Just some uncertainty (following the StdDevUncertainty implementation of # storing the uncertainty in a property 'array') with slicing. class SomeUncertainty(NDUncertainty): @property def uncertainty_type(self): return 'fake' def _propagate_add(self, data, final_data): pass def _propagate_subtract(self, data, final_data): pass def _propagate_multiply(self, data, final_data): pass def _propagate_divide(self, data, final_data): pass def test_slicing_only_data(): data = np.arange(10) nd = NDDataSliceable(data) nd2 = nd[2:5] assert_array_equal(data[2:5], nd2.data) def test_slicing_data_scalar_fail(): data = np.array(10) nd = NDDataSliceable(data) with pytest.raises(TypeError): # as exc nd[:] # assert exc.value.args[0] == 'Scalars cannot be sliced.' def test_slicing_1ddata_ndslice(): data = np.array([10, 20]) nd = NDDataSliceable(data) # Standard numpy warning here: with pytest.raises(IndexError): nd[:, :] @pytest.mark.parametrize('prop_name', ['mask', 'wcs', 'uncertainty']) def test_slicing_1dmask_ndslice(prop_name): # Data is 2d but mask only 1d so this should let the IndexError when # slicing the mask rise to the user. data = np.ones((3, 3)) kwarg = {prop_name: np.ones(3)} nd = NDDataSliceable(data, **kwarg) # Standard numpy warning here: with pytest.raises(IndexError): nd[:, :] def test_slicing_all_npndarray_1d(): data = np.arange(10) mask = data > 3 uncertainty = np.linspace(10, 20, 10) wcs = np.linspace(1, 1000, 10) # Just to have them too unit = u.s meta = {'observer': 'Brian'} nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs, unit=unit, meta=meta) nd2 = nd[2:5] assert_array_equal(data[2:5], nd2.data) assert_array_equal(mask[2:5], nd2.mask) assert_array_equal(uncertainty[2:5], nd2.uncertainty.array) assert_array_equal(wcs[2:5], nd2.wcs) assert unit is nd2.unit assert meta == nd.meta def test_slicing_all_npndarray_nd(): # See what happens for multidimensional properties data = np.arange(1000).reshape(10, 10, 10) mask = data > 3 uncertainty = np.linspace(10, 20, 1000).reshape(10, 10, 10) wcs = np.linspace(1, 1000, 1000).reshape(10, 10, 10) nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs) # Slice only 1D nd2 = nd[2:5] assert_array_equal(data[2:5], nd2.data) assert_array_equal(mask[2:5], nd2.mask) assert_array_equal(uncertainty[2:5], nd2.uncertainty.array) assert_array_equal(wcs[2:5], nd2.wcs) # Slice 3D nd2 = nd[2:5, :, 4:7] assert_array_equal(data[2:5, :, 4:7], nd2.data) assert_array_equal(mask[2:5, :, 4:7], nd2.mask) assert_array_equal(uncertainty[2:5, :, 4:7], nd2.uncertainty.array) assert_array_equal(wcs[2:5, :, 4:7], nd2.wcs) def test_slicing_all_npndarray_shape_diff(): data = np.arange(10) mask = (data > 3)[0:9] uncertainty = np.linspace(10, 20, 15) wcs = np.linspace(1, 1000, 12) nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs) nd2 = nd[2:5] assert_array_equal(data[2:5], nd2.data) # All are sliced even if the shapes differ (no Info) assert_array_equal(mask[2:5], nd2.mask) assert_array_equal(uncertainty[2:5], nd2.uncertainty.array) assert_array_equal(wcs[2:5], nd2.wcs) def test_slicing_all_something_wrong(): data = np.arange(10) mask = [False]*10 uncertainty = {'rdnoise': 2.9, 'gain': 1.4} wcs = 145 * u.degree nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs) nd2 = nd[2:5] # Sliced properties: assert_array_equal(data[2:5], nd2.data) assert_array_equal(mask[2:5], nd2.mask) # Not sliced attributes (they will raise a Info nevertheless) uncertainty is nd2.uncertainty assert_array_equal(wcs, nd2.wcs) def test_boolean_slicing(): data = np.arange(10) mask = data.copy() uncertainty = StdDevUncertainty(data.copy()) wcs = data.copy() nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs) nd2 = nd[(nd.data >= 3) & (nd.data < 8)] assert_array_equal(data[3:8], nd2.data) assert_array_equal(mask[3:8], nd2.mask) assert_array_equal(wcs[3:8], nd2.wcs) assert_array_equal(uncertainty.array[3:8], nd2.uncertainty.array)
89fc021581f6c13015170edf19615735cb3b38e6dbc102d9164377a63e82b10b
from ... import NDData, NDIOMixin, NDDataRef # Alias NDDataAllMixins in case this will be renamed ... :-) NDDataIO = NDDataRef def test_simple_write_read(tmpdir): ndd = NDDataIO([1, 2, 3]) assert hasattr(ndd, 'read') assert hasattr(ndd, 'write')
bbe134d7a256b6e0b86686ec953091169bc0194108b6c8580324191bb808048a
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy.testing import assert_array_equal, assert_array_almost_equal from ...nduncertainty import (StdDevUncertainty, UnknownUncertainty, IncompatibleUncertaintiesException) from ... import NDDataRef from ...nddata import NDData from ....units import UnitsError, Quantity from .... import units as u # Alias NDDataAllMixins in case this will be renamed ... :-) NDDataArithmetic = NDDataRef class StdDevUncertaintyUncorrelated(StdDevUncertainty): @property def supports_correlated(self): return False # Test with Data covers: # scalars, 1D, 2D and 3D # broadcasting between them @pytest.mark.parametrize(('data1', 'data2'), [ (np.array(5), np.array(10)), (np.array(5), np.arange(10)), (np.array(5), np.arange(10).reshape(2, 5)), (np.arange(10), np.ones(10) * 2), (np.arange(10), np.ones((10, 10)) * 2), (np.arange(10).reshape(2, 5), np.ones((2, 5)) * 3), (np.arange(1000).reshape(20, 5, 10), np.ones((20, 5, 10)) * 3) ]) def test_arithmetics_data(data1, data2): nd1 = NDDataArithmetic(data1) nd2 = NDDataArithmetic(data2) # Addition nd3 = nd1.add(nd2) assert_array_equal(data1+data2, nd3.data) # Subtraction nd4 = nd1.subtract(nd2) assert_array_equal(data1-data2, nd4.data) # Multiplication nd5 = nd1.multiply(nd2) assert_array_equal(data1*data2, nd5.data) # Division nd6 = nd1.divide(nd2) assert_array_equal(data1/data2, nd6.data) for nd in [nd3, nd4, nd5, nd6]: # Check that broadcasting worked as expected if data1.ndim > data2.ndim: assert data1.shape == nd.data.shape else: assert data2.shape == nd.data.shape # Check all other attributes are not set assert nd.unit is None assert nd.uncertainty is None assert nd.mask is None assert len(nd.meta) == 0 assert nd.wcs is None # Invalid arithmetic operations for data covering: # not broadcastable data def test_arithmetics_data_invalid(): nd1 = NDDataArithmetic([1, 2, 3]) nd2 = NDDataArithmetic([1, 2]) with pytest.raises(ValueError): nd1.add(nd2) # Test with Data and unit and covers: # identical units (even dimensionless unscaled vs. no unit), # equivalent units (such as meter and kilometer) # equivalent composite units (such as m/s and km/h) @pytest.mark.parametrize(('data1', 'data2'), [ (np.array(5) * u.s, np.array(10) * u.s), (np.array(5) * u.s, np.arange(10) * u.h), (np.array(5) * u.s, np.arange(10).reshape(2, 5) * u.min), (np.arange(10) * u.m / u.s, np.ones(10) * 2 * u.km / u.s), (np.arange(10) * u.m / u.s, np.ones((10, 10)) * 2 * u.m / u.h), (np.arange(10).reshape(2, 5) * u.m / u.s, np.ones((2, 5)) * 3 * u.km / u.h), (np.arange(1000).reshape(20, 5, 10), np.ones((20, 5, 10)) * 3 * u.dimensionless_unscaled), (np.array(5), np.array(10) * u.s / u.h), ]) def test_arithmetics_data_unit_identical(data1, data2): nd1 = NDDataArithmetic(data1) nd2 = NDDataArithmetic(data2) # Addition nd3 = nd1.add(nd2) ref = data1 + data2 ref_unit, ref_data = ref.unit, ref.value assert_array_equal(ref_data, nd3.data) assert nd3.unit == ref_unit # Subtraction nd4 = nd1.subtract(nd2) ref = data1 - data2 ref_unit, ref_data = ref.unit, ref.value assert_array_equal(ref_data, nd4.data) assert nd4.unit == ref_unit # Multiplication nd5 = nd1.multiply(nd2) ref = data1 * data2 ref_unit, ref_data = ref.unit, ref.value assert_array_equal(ref_data, nd5.data) assert nd5.unit == ref_unit # Division nd6 = nd1.divide(nd2) ref = data1 / data2 ref_unit, ref_data = ref.unit, ref.value assert_array_equal(ref_data, nd6.data) assert nd6.unit == ref_unit for nd in [nd3, nd4, nd5, nd6]: # Check that broadcasting worked as expected if data1.ndim > data2.ndim: assert data1.shape == nd.data.shape else: assert data2.shape == nd.data.shape # Check all other attributes are not set assert nd.uncertainty is None assert nd.mask is None assert len(nd.meta) == 0 assert nd.wcs is None # Test with Data and unit and covers: # not identical not convertible units # one with unit (which is not dimensionless) and one without @pytest.mark.parametrize(('data1', 'data2'), [ (np.array(5) * u.s, np.array(10) * u.m), (np.array(5) * u.Mpc, np.array(10) * u.km / u.s), (np.array(5) * u.Mpc, np.array(10)), (np.array(5), np.array(10) * u.s), ]) def test_arithmetics_data_unit_not_identical(data1, data2): nd1 = NDDataArithmetic(data1) nd2 = NDDataArithmetic(data2) # Addition should not be possible with pytest.raises(UnitsError): nd1.add(nd2) # Subtraction should not be possible with pytest.raises(UnitsError): nd1.subtract(nd2) # Multiplication is possible nd3 = nd1.multiply(nd2) ref = data1 * data2 ref_unit, ref_data = ref.unit, ref.value assert_array_equal(ref_data, nd3.data) assert nd3.unit == ref_unit # Division is possible nd4 = nd1.divide(nd2) ref = data1 / data2 ref_unit, ref_data = ref.unit, ref.value assert_array_equal(ref_data, nd4.data) assert nd4.unit == ref_unit for nd in [nd3, nd4]: # Check all other attributes are not set assert nd.uncertainty is None assert nd.mask is None assert len(nd.meta) == 0 assert nd.wcs is None # Tests with wcs (not very sensible because there is no operation between them # covering: # both set and identical/not identical # one set # None set @pytest.mark.parametrize(('wcs1', 'wcs2'), [ (None, None), (None, 5), (5, None), (5, 5), (7, 5), ]) def test_arithmetics_data_wcs(wcs1, wcs2): nd1 = NDDataArithmetic(1, wcs=wcs1) nd2 = NDDataArithmetic(1, wcs=wcs2) if wcs1 is None and wcs2 is None: ref_wcs = None elif wcs1 is None: ref_wcs = wcs2 elif wcs2 is None: ref_wcs = wcs1 else: ref_wcs = wcs1 # Addition nd3 = nd1.add(nd2) assert ref_wcs == nd3.wcs # Subtraction nd4 = nd1.subtract(nd2) assert ref_wcs == nd3.wcs # Multiplication nd5 = nd1.multiply(nd2) assert ref_wcs == nd3.wcs # Division nd6 = nd1.divide(nd2) assert ref_wcs == nd3.wcs for nd in [nd3, nd4, nd5, nd6]: # Check all other attributes are not set assert nd.unit is None assert nd.uncertainty is None assert len(nd.meta) == 0 assert nd.mask is None # Masks are completely separated in the NDArithmetics from the data so we need # no correlated tests but covering: # masks 1D, 2D and mixed cases with broadcasting @pytest.mark.parametrize(('mask1', 'mask2'), [ (None, None), (None, False), (True, None), (False, False), (True, False), (False, True), (True, True), (np.array(False), np.array(True)), (np.array(False), np.array([0, 1, 0, 1, 1], dtype=np.bool_)), (np.array(True), np.array([[0, 1, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=np.bool_)), (np.array([0, 1, 0, 1, 1], dtype=np.bool_), np.array([1, 1, 0, 0, 1], dtype=np.bool_)), (np.array([0, 1, 0, 1, 1], dtype=np.bool_), np.array([[0, 1, 0, 1, 1], [1, 0, 0, 1, 1]], dtype=np.bool_)), (np.array([[0, 1, 0, 1, 1], [1, 0, 0, 1, 1]], dtype=np.bool_), np.array([[0, 1, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=np.bool_)), ]) def test_arithmetics_data_masks(mask1, mask2): nd1 = NDDataArithmetic(1, mask=mask1) nd2 = NDDataArithmetic(1, mask=mask2) if mask1 is None and mask2 is None: ref_mask = None elif mask1 is None: ref_mask = mask2 elif mask2 is None: ref_mask = mask1 else: ref_mask = mask1 | mask2 # Addition nd3 = nd1.add(nd2) assert_array_equal(ref_mask, nd3.mask) # Subtraction nd4 = nd1.subtract(nd2) assert_array_equal(ref_mask, nd4.mask) # Multiplication nd5 = nd1.multiply(nd2) assert_array_equal(ref_mask, nd5.mask) # Division nd6 = nd1.divide(nd2) assert_array_equal(ref_mask, nd6.mask) for nd in [nd3, nd4, nd5, nd6]: # Check all other attributes are not set assert nd.unit is None assert nd.uncertainty is None assert len(nd.meta) == 0 assert nd.wcs is None # One additional case which can not be easily incorporated in the test above # what happens if the masks are numpy ndarrays are not broadcastable def test_arithmetics_data_masks_invalid(): nd1 = NDDataArithmetic(1, mask=np.array([1, 0], dtype=np.bool_)) nd2 = NDDataArithmetic(1, mask=np.array([1, 0, 1], dtype=np.bool_)) with pytest.raises(ValueError): nd1.add(nd2) with pytest.raises(ValueError): nd1.multiply(nd2) with pytest.raises(ValueError): nd1.subtract(nd2) with pytest.raises(ValueError): nd1.divide(nd2) # Covering: # both have uncertainties (data and uncertainty without unit) # tested against manually determined resulting uncertainties to verify the # implemented formulas # this test only works as long as data1 and data2 do not contain any 0 def test_arithmetics_stddevuncertainty_basic(): nd1 = NDDataArithmetic([1, 2, 3], uncertainty=StdDevUncertainty([1, 1, 3])) nd2 = NDDataArithmetic([2, 2, 2], uncertainty=StdDevUncertainty([2, 2, 2])) nd3 = nd1.add(nd2) nd4 = nd2.add(nd1) # Inverse operation should result in the same uncertainty assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array) # Compare it to the theoretical uncertainty ref_uncertainty = np.sqrt(np.array([1, 1, 3])**2 + np.array([2, 2, 2])**2) assert_array_equal(nd3.uncertainty.array, ref_uncertainty) nd3 = nd1.subtract(nd2) nd4 = nd2.subtract(nd1) # Inverse operation should result in the same uncertainty assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array) # Compare it to the theoretical uncertainty (same as for add) assert_array_equal(nd3.uncertainty.array, ref_uncertainty) # Multiplication and Division only work with almost equal array comparisons # since the formula implemented and the formula used as reference are # slightly different. nd3 = nd1.multiply(nd2) nd4 = nd2.multiply(nd1) # Inverse operation should result in the same uncertainty assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array) # Compare it to the theoretical uncertainty ref_uncertainty = np.abs(np.array([2, 4, 6])) * np.sqrt( (np.array([1, 1, 3]) / np.array([1, 2, 3]))**2 + (np.array([2, 2, 2]) / np.array([2, 2, 2]))**2) assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty) nd3 = nd1.divide(nd2) nd4 = nd2.divide(nd1) # Inverse operation gives a different uncertainty! # Compare it to the theoretical uncertainty ref_uncertainty_1 = np.abs(np.array([1/2, 2/2, 3/2])) * np.sqrt( (np.array([1, 1, 3]) / np.array([1, 2, 3]))**2 + (np.array([2, 2, 2]) / np.array([2, 2, 2]))**2) assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1) ref_uncertainty_2 = np.abs(np.array([2, 1, 2/3])) * np.sqrt( (np.array([1, 1, 3]) / np.array([1, 2, 3]))**2 + (np.array([2, 2, 2]) / np.array([2, 2, 2]))**2) assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2) # Tests for correlation, covering # correlation between -1 and 1 with correlation term being positive / negative # also with one data being once positive and once completely negative # The point of this test is to compare the used formula to the theoretical one. # TODO: Maybe covering units too but I think that should work because of # the next tests. Also this may be reduced somehow. @pytest.mark.parametrize(('cor', 'uncert1', 'data2'), [ (-1, [1, 1, 3], [2, 2, 7]), (-0.5, [1, 1, 3], [2, 2, 7]), (-0.25, [1, 1, 3], [2, 2, 7]), (0, [1, 1, 3], [2, 2, 7]), (0.25, [1, 1, 3], [2, 2, 7]), (0.5, [1, 1, 3], [2, 2, 7]), (1, [1, 1, 3], [2, 2, 7]), (-1, [-1, -1, -3], [2, 2, 7]), (-0.5, [-1, -1, -3], [2, 2, 7]), (-0.25, [-1, -1, -3], [2, 2, 7]), (0, [-1, -1, -3], [2, 2, 7]), (0.25, [-1, -1, -3], [2, 2, 7]), (0.5, [-1, -1, -3], [2, 2, 7]), (1, [-1, -1, -3], [2, 2, 7]), (-1, [1, 1, 3], [-2, -3, -2]), (-0.5, [1, 1, 3], [-2, -3, -2]), (-0.25, [1, 1, 3], [-2, -3, -2]), (0, [1, 1, 3], [-2, -3, -2]), (0.25, [1, 1, 3], [-2, -3, -2]), (0.5, [1, 1, 3], [-2, -3, -2]), (1, [1, 1, 3], [-2, -3, -2]), (-1, [-1, -1, -3], [-2, -3, -2]), (-0.5, [-1, -1, -3], [-2, -3, -2]), (-0.25, [-1, -1, -3], [-2, -3, -2]), (0, [-1, -1, -3], [-2, -3, -2]), (0.25, [-1, -1, -3], [-2, -3, -2]), (0.5, [-1, -1, -3], [-2, -3, -2]), (1, [-1, -1, -3], [-2, -3, -2]), ]) def test_arithmetics_stddevuncertainty_basic_with_correlation( cor, uncert1, data2): data1 = np.array([1, 2, 3]) data2 = np.array(data2) uncert1 = np.array(uncert1) uncert2 = np.array([2, 2, 2]) nd1 = NDDataArithmetic(data1, uncertainty=StdDevUncertainty(uncert1)) nd2 = NDDataArithmetic(data2, uncertainty=StdDevUncertainty(uncert2)) nd3 = nd1.add(nd2, uncertainty_correlation=cor) nd4 = nd2.add(nd1, uncertainty_correlation=cor) # Inverse operation should result in the same uncertainty assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array) # Compare it to the theoretical uncertainty ref_uncertainty = np.sqrt(uncert1**2 + uncert2**2 + 2 * cor * uncert1 * uncert2) assert_array_equal(nd3.uncertainty.array, ref_uncertainty) nd3 = nd1.subtract(nd2, uncertainty_correlation=cor) nd4 = nd2.subtract(nd1, uncertainty_correlation=cor) # Inverse operation should result in the same uncertainty assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array) # Compare it to the theoretical uncertainty ref_uncertainty = np.sqrt(uncert1**2 + uncert2**2 - 2 * cor * uncert1 * uncert2) assert_array_equal(nd3.uncertainty.array, ref_uncertainty) # Multiplication and Division only work with almost equal array comparisons # since the formula implemented and the formula used as reference are # slightly different. nd3 = nd1.multiply(nd2, uncertainty_correlation=cor) nd4 = nd2.multiply(nd1, uncertainty_correlation=cor) # Inverse operation should result in the same uncertainty assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array) # Compare it to the theoretical uncertainty ref_uncertainty = (np.abs(data1 * data2)) * np.sqrt( (uncert1 / data1)**2 + (uncert2 / data2)**2 + (2 * cor * uncert1 * uncert2 / (data1 * data2))) assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty) nd3 = nd1.divide(nd2, uncertainty_correlation=cor) nd4 = nd2.divide(nd1, uncertainty_correlation=cor) # Inverse operation gives a different uncertainty! # Compare it to the theoretical uncertainty ref_uncertainty_1 = (np.abs(data1 / data2)) * np.sqrt( (uncert1 / data1)**2 + (uncert2 / data2)**2 - (2 * cor * uncert1 * uncert2 / (data1 * data2))) assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1) ref_uncertainty_2 = (np.abs(data2 / data1)) * np.sqrt( (uncert1 / data1)**2 + (uncert2 / data2)**2 - (2 * cor * uncert1 * uncert2 / (data1 * data2))) assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2) # Covering: # just an example that a np.ndarray works as correlation, no checks for # the right result since these were basically done in the function above. def test_arithmetics_stddevuncertainty_basic_with_correlation_array(): data1 = np.array([1, 2, 3]) data2 = np.array([1, 1, 1]) uncert1 = np.array([1, 1, 1]) uncert2 = np.array([2, 2, 2]) cor = np.array([0, 0.25, 0]) nd1 = NDDataArithmetic(data1, uncertainty=StdDevUncertainty(uncert1)) nd2 = NDDataArithmetic(data2, uncertainty=StdDevUncertainty(uncert2)) nd1.add(nd2, uncertainty_correlation=cor) # Covering: # That propagate throws an exception when correlation is given but the # uncertainty does not support correlation. def test_arithmetics_with_correlation_unsupported(): data1 = np.array([1, 2, 3]) data2 = np.array([1, 1, 1]) uncert1 = np.array([1, 1, 1]) uncert2 = np.array([2, 2, 2]) cor = 3 nd1 = NDDataArithmetic(data1, uncertainty=StdDevUncertaintyUncorrelated(uncert1)) nd2 = NDDataArithmetic(data2, uncertainty=StdDevUncertaintyUncorrelated(uncert2)) with pytest.raises(ValueError): nd1.add(nd2, uncertainty_correlation=cor) # Covering: # only one has an uncertainty (data and uncertainty without unit) # tested against the case where the other one has zero uncertainty. (this case # must be correct because we tested it in the last case) # Also verify that if the result of the data has negative values the resulting # uncertainty has no negative values. def test_arithmetics_stddevuncertainty_one_missing(): nd1 = NDDataArithmetic([1, -2, 3]) nd1_ref = NDDataArithmetic([1, -2, 3], uncertainty=StdDevUncertainty([0, 0, 0])) nd2 = NDDataArithmetic([2, 2, -2], uncertainty=StdDevUncertainty([2, 2, 2])) # Addition nd3 = nd1.add(nd2) nd3_ref = nd1_ref.add(nd2) assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array) assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array) nd3 = nd2.add(nd1) nd3_ref = nd2.add(nd1_ref) assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array) assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array) # Subtraction nd3 = nd1.subtract(nd2) nd3_ref = nd1_ref.subtract(nd2) assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array) assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array) nd3 = nd2.subtract(nd1) nd3_ref = nd2.subtract(nd1_ref) assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array) assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array) # Multiplication nd3 = nd1.multiply(nd2) nd3_ref = nd1_ref.multiply(nd2) assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array) assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array) nd3 = nd2.multiply(nd1) nd3_ref = nd2.multiply(nd1_ref) assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array) assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array) # Division nd3 = nd1.divide(nd2) nd3_ref = nd1_ref.divide(nd2) assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array) assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array) nd3 = nd2.divide(nd1) nd3_ref = nd2.divide(nd1_ref) assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array) assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array) # Covering: # data with unit and uncertainty with unit (but equivalent units) # compared against correctly scaled NDDatas @pytest.mark.parametrize(('uncert1', 'uncert2'), [ (np.array([1, 2, 3]) * u.m, None), (np.array([1, 2, 3]) * u.cm, None), (None, np.array([1, 2, 3]) * u.m), (None, np.array([1, 2, 3]) * u.cm), (np.array([1, 2, 3]), np.array([2, 3, 4])), (np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])), (np.array([1, 2, 3]), np.array([2, 3, 4])) * u.m, (np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])) * u.m, (np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])), (np.array([1, 2, 3]), np.array([2, 3, 4])) * u.cm, (np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])) * u.cm, (np.array([1, 2, 3]) * u.km, np.array([2, 3, 4])) * u.cm, ]) def test_arithmetics_stddevuncertainty_with_units(uncert1, uncert2): # Data has same units data1 = np.array([1, 2, 3]) * u.m data2 = np.array([-4, 7, 0]) * u.m if uncert1 is not None: uncert1 = StdDevUncertainty(uncert1) if isinstance(uncert1, Quantity): uncert1_ref = uncert1.to_value(data1.unit) else: uncert1_ref = uncert1 uncert_ref1 = StdDevUncertainty(uncert1_ref, copy=True) else: uncert1 = None uncert_ref1 = None if uncert2 is not None: uncert2 = StdDevUncertainty(uncert2) if isinstance(uncert2, Quantity): uncert2_ref = uncert2.to_value(data2.unit) else: uncert2_ref = uncert2 uncert_ref2 = StdDevUncertainty(uncert2_ref, copy=True) else: uncert2 = None uncert_ref2 = None nd1 = NDDataArithmetic(data1, uncertainty=uncert1) nd2 = NDDataArithmetic(data2, uncertainty=uncert2) nd1_ref = NDDataArithmetic(data1, uncertainty=uncert_ref1) nd2_ref = NDDataArithmetic(data2, uncertainty=uncert_ref2) # Let's start the tests # Addition nd3 = nd1.add(nd2) nd3_ref = nd1_ref.add(nd2_ref) assert nd3.unit == nd3_ref.unit assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array) nd3 = nd2.add(nd1) nd3_ref = nd2_ref.add(nd1_ref) assert nd3.unit == nd3_ref.unit assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array) # Subtraction nd3 = nd1.subtract(nd2) nd3_ref = nd1_ref.subtract(nd2_ref) assert nd3.unit == nd3_ref.unit assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array) nd3 = nd2.subtract(nd1) nd3_ref = nd2_ref.subtract(nd1_ref) assert nd3.unit == nd3_ref.unit assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array) # Multiplication nd3 = nd1.multiply(nd2) nd3_ref = nd1_ref.multiply(nd2_ref) assert nd3.unit == nd3_ref.unit assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array) nd3 = nd2.multiply(nd1) nd3_ref = nd2_ref.multiply(nd1_ref) assert nd3.unit == nd3_ref.unit assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array) # Division nd3 = nd1.divide(nd2) nd3_ref = nd1_ref.divide(nd2_ref) assert nd3.unit == nd3_ref.unit assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array) nd3 = nd2.divide(nd1) nd3_ref = nd2_ref.divide(nd1_ref) assert nd3.unit == nd3_ref.unit assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array) # Test abbreviation and long name for taking the first found meta, mask, wcs @pytest.mark.parametrize(('use_abbreviation'), ['ff', 'first_found']) def test_arithmetics_handle_switches(use_abbreviation): meta1 = {'a': 1} meta2 = {'b': 2} mask1 = True mask2 = False uncertainty1 = StdDevUncertainty([1, 2, 3]) uncertainty2 = StdDevUncertainty([1, 2, 3]) wcs1 = 5 wcs2 = 100 data1 = [1, 1, 1] data2 = [1, 1, 1] nd1 = NDDataArithmetic(data1, meta=meta1, mask=mask1, wcs=wcs1, uncertainty=uncertainty1) nd2 = NDDataArithmetic(data2, meta=meta2, mask=mask2, wcs=wcs2, uncertainty=uncertainty2) nd3 = NDDataArithmetic(data1) # Both have the attributes but option None is chosen nd_ = nd1.add(nd2, propagate_uncertainties=None, handle_meta=None, handle_mask=None, compare_wcs=None) assert nd_.wcs is None assert len(nd_.meta) == 0 assert nd_.mask is None assert nd_.uncertainty is None # Only second has attributes and False is chosen nd_ = nd3.add(nd2, propagate_uncertainties=False, handle_meta=use_abbreviation, handle_mask=use_abbreviation, compare_wcs=use_abbreviation) assert nd_.wcs == wcs2 assert nd_.meta == meta2 assert nd_.mask == mask2 assert_array_equal(nd_.uncertainty.array, uncertainty2.array) # Only first has attributes and False is chosen nd_ = nd1.add(nd3, propagate_uncertainties=False, handle_meta=use_abbreviation, handle_mask=use_abbreviation, compare_wcs=use_abbreviation) assert nd_.wcs == wcs1 assert nd_.meta == meta1 assert nd_.mask == mask1 assert_array_equal(nd_.uncertainty.array, uncertainty1.array) def test_arithmetics_meta_func(): def meta_fun_func(meta1, meta2, take='first'): if take == 'first': return meta1 else: return meta2 meta1 = {'a': 1} meta2 = {'a': 3, 'b': 2} mask1 = True mask2 = False uncertainty1 = StdDevUncertainty([1, 2, 3]) uncertainty2 = StdDevUncertainty([1, 2, 3]) wcs1 = 5 wcs2 = 100 data1 = [1, 1, 1] data2 = [1, 1, 1] nd1 = NDDataArithmetic(data1, meta=meta1, mask=mask1, wcs=wcs1, uncertainty=uncertainty1) nd2 = NDDataArithmetic(data2, meta=meta2, mask=mask2, wcs=wcs2, uncertainty=uncertainty2) nd3 = nd1.add(nd2, handle_meta=meta_fun_func) assert nd3.meta['a'] == 1 assert 'b' not in nd3.meta nd4 = nd1.add(nd2, handle_meta=meta_fun_func, meta_take='second') assert nd4.meta['a'] == 3 assert nd4.meta['b'] == 2 with pytest.raises(KeyError): nd1.add(nd2, handle_meta=meta_fun_func, take='second') def test_arithmetics_wcs_func(): def wcs_comp_func(wcs1, wcs2, tolerance=0.1): if abs(wcs1 - wcs2) <= tolerance: return True else: return False meta1 = {'a': 1} meta2 = {'a': 3, 'b': 2} mask1 = True mask2 = False uncertainty1 = StdDevUncertainty([1, 2, 3]) uncertainty2 = StdDevUncertainty([1, 2, 3]) wcs1 = 99.99 wcs2 = 100 data1 = [1, 1, 1] data2 = [1, 1, 1] nd1 = NDDataArithmetic(data1, meta=meta1, mask=mask1, wcs=wcs1, uncertainty=uncertainty1) nd2 = NDDataArithmetic(data2, meta=meta2, mask=mask2, wcs=wcs2, uncertainty=uncertainty2) nd3 = nd1.add(nd2, compare_wcs=wcs_comp_func) assert nd3.wcs == 99.99 with pytest.raises(ValueError): nd1.add(nd2, compare_wcs=wcs_comp_func, wcs_tolerance=0.00001) with pytest.raises(KeyError): nd1.add(nd2, compare_wcs=wcs_comp_func, tolerance=1) def test_arithmetics_mask_func(): def mask_sad_func(mask1, mask2, fun=0): if fun > 0.5: return mask2 else: return mask1 meta1 = {'a': 1} meta2 = {'a': 3, 'b': 2} mask1 = [True, False, True] mask2 = [True, False, False] uncertainty1 = StdDevUncertainty([1, 2, 3]) uncertainty2 = StdDevUncertainty([1, 2, 3]) wcs1 = 99.99 wcs2 = 100 data1 = [1, 1, 1] data2 = [1, 1, 1] nd1 = NDDataArithmetic(data1, meta=meta1, mask=mask1, wcs=wcs1, uncertainty=uncertainty1) nd2 = NDDataArithmetic(data2, meta=meta2, mask=mask2, wcs=wcs2, uncertainty=uncertainty2) nd3 = nd1.add(nd2, handle_mask=mask_sad_func) assert_array_equal(nd3.mask, nd1.mask) nd4 = nd1.add(nd2, handle_mask=mask_sad_func, mask_fun=1) assert_array_equal(nd4.mask, nd2.mask) with pytest.raises(KeyError): nd1.add(nd2, handle_mask=mask_sad_func, fun=1) @pytest.mark.parametrize('meth', ['add', 'subtract', 'divide', 'multiply']) def test_two_argument_useage(meth): ndd1 = NDDataArithmetic(np.ones((3, 3))) ndd2 = NDDataArithmetic(np.ones((3, 3))) # Call add on the class (not the instance) and compare it with already # tested useage: ndd3 = getattr(NDDataArithmetic, meth)(ndd1, ndd2) ndd4 = getattr(ndd1, meth)(ndd2) np.testing.assert_array_equal(ndd3.data, ndd4.data) # And the same done on an unrelated instance... ndd3 = getattr(NDDataArithmetic(-100), meth)(ndd1, ndd2) ndd4 = getattr(ndd1, meth)(ndd2) np.testing.assert_array_equal(ndd3.data, ndd4.data) @pytest.mark.parametrize('meth', ['add', 'subtract', 'divide', 'multiply']) def test_two_argument_useage_non_nddata_first_arg(meth): data1 = 50 data2 = 100 # Call add on the class (not the instance) ndd3 = getattr(NDDataArithmetic, meth)(data1, data2) # Compare it with the instance-useage and two identical NDData-like # classes: ndd1 = NDDataArithmetic(data1) ndd2 = NDDataArithmetic(data2) ndd4 = getattr(ndd1, meth)(ndd2) np.testing.assert_array_equal(ndd3.data, ndd4.data) # and check it's also working when called on an instance ndd3 = getattr(NDDataArithmetic(-100), meth)(data1, data2) ndd4 = getattr(ndd1, meth)(ndd2) np.testing.assert_array_equal(ndd3.data, ndd4.data) def test_arithmetics_unknown_uncertainties(): # Not giving any uncertainty class means it is saved as UnknownUncertainty ndd1 = NDDataArithmetic(np.ones((3, 3)), uncertainty=UnknownUncertainty(np.ones((3, 3)))) ndd2 = NDDataArithmetic(np.ones((3, 3)), uncertainty=UnknownUncertainty(np.ones((3, 3))*2)) # There is no way to propagate uncertainties: with pytest.raises(IncompatibleUncertaintiesException): ndd1.add(ndd2) # But it should be possible without propagation ndd3 = ndd1.add(ndd2, propagate_uncertainties=False) np.testing.assert_array_equal(ndd1.uncertainty.array, ndd3.uncertainty.array) ndd4 = ndd1.add(ndd2, propagate_uncertainties=None) assert ndd4.uncertainty is None
ce5045e6c76e8231e8de1b7d036db247429893677e59bdf6847558d68281b073
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from .. import core as erfa from ...tests.helper import catch_warnings def test_erfa_wrapper(): """ Runs a set of tests that mostly make sure vectorization is working as expected """ jd = np.linspace(2456855.5, 2456855.5+1.0/24.0/60.0, 60*2+1) ra = np.linspace(0.0, np.pi*2.0, 5) dec = np.linspace(-np.pi/2.0, np.pi/2.0, 4) aob, zob, hob, dob, rob, eo = erfa.atco13(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, jd, 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5) assert aob.shape == (121,) aob, zob, hob, dob, rob, eo = erfa.atco13(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, jd[0], 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5) assert aob.shape == () aob, zob, hob, dob, rob, eo = erfa.atco13(ra[:, None, None], dec[None, :, None], 0.0, 0.0, 0.0, 0.0, jd[None, None, :], 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5) (aob.shape) == (5, 4, 121) iy, im, id, ihmsf = erfa.d2dtf("UTC", 3, jd, 0.0) assert iy.shape == (121,) assert ihmsf.shape == (121, 4) assert ihmsf.dtype == np.dtype('i4') iy, im, id, ihmsf = erfa.d2dtf("UTC", 3, jd[0], 0.0) assert iy.shape == () assert ihmsf.shape == (4,) assert ihmsf.dtype == np.dtype('i4') def test_angle_ops(): sign, idmsf = erfa.a2af(6, -np.pi) assert sign == b'-' assert (idmsf == [180, 0, 0, 0]).all() sign, ihmsf = erfa.a2tf(6, np.pi) assert sign == b'+' assert (ihmsf == [12, 0, 0, 0]).all() rad = erfa.af2a('-', 180, 0, 0.0) np.testing.assert_allclose(rad, -np.pi) rad = erfa.tf2a('+', 12, 0, 0.0) np.testing.assert_allclose(rad, np.pi) rad = erfa.anp(3.*np.pi) np.testing.assert_allclose(rad, np.pi) rad = erfa.anpm(3.*np.pi) np.testing.assert_allclose(rad, -np.pi) sign, ihmsf = erfa.d2tf(1, -1.5) assert sign == b'-' assert (ihmsf == [36, 0, 0, 0]).all() days = erfa.tf2d('+', 3, 0, 0.0) np.testing.assert_allclose(days, 0.125) def test_spherical_cartesian(): theta, phi = erfa.c2s([0.0, np.sqrt(2.0), np.sqrt(2.0)]) np.testing.assert_allclose(theta, np.pi/2.0) np.testing.assert_allclose(phi, np.pi/4.0) theta, phi, r = erfa.p2s([0.0, np.sqrt(2.0), np.sqrt(2.0)]) np.testing.assert_allclose(theta, np.pi/2.0) np.testing.assert_allclose(phi, np.pi/4.0) np.testing.assert_allclose(r, 2.0) theta, phi, r, td, pd, rd = erfa.pv2s([[0.0, np.sqrt(2.0), np.sqrt(2.0)], [1.0, 0.0, 0.0]]) np.testing.assert_allclose(theta, np.pi/2.0) np.testing.assert_allclose(phi, np.pi/4.0) np.testing.assert_allclose(r, 2.0) np.testing.assert_allclose(td, -np.sqrt(2.0)/2.0) np.testing.assert_allclose(pd, 0.0) np.testing.assert_allclose(rd, 0.0) c = erfa.s2c(np.pi/2.0, np.pi/4.0) np.testing.assert_allclose(c, [0.0, np.sqrt(2.0)/2.0, np.sqrt(2.0)/2.0], atol=1e-14) c = erfa.s2p(np.pi/2.0, np.pi/4.0, 1.0) np.testing.assert_allclose(c, [0.0, np.sqrt(2.0)/2.0, np.sqrt(2.0)/2.0], atol=1e-14) pv = erfa.s2pv(np.pi/2.0, np.pi/4.0, 2.0, np.sqrt(2.0)/2.0, 0.0, 0.0) np.testing.assert_allclose(pv, [[0.0, np.sqrt(2.0), np.sqrt(2.0)], [-1.0, 0.0, 0.0]], atol=1e-14) def test_errwarn_reporting(): """ Test that the ERFA error reporting mechanism works as it should """ # no warning erfa.dat(1990, 1, 1, 0.5) # check warning is raised for a scalar with catch_warnings() as w: erfa.dat(100, 1, 1, 0.5) assert len(w) == 1 assert w[0].category == erfa.ErfaWarning assert '1 of "dubious year (Note 1)"' in str(w[0].message) # and that the count is right for a vector. with catch_warnings() as w: erfa.dat([100, 200, 1990], 1, 1, 0.5) assert len(w) == 1 assert w[0].category == erfa.ErfaWarning assert '2 of "dubious year (Note 1)"' in str(w[0].message) try: erfa.dat(1990, [1, 34, 2], [1, 1, 43], 0.5) except erfa.ErfaError as e: if '1 of "bad day (Note 3)", 1 of "bad month"' not in e.args[0]: assert False, 'Raised the correct type of error, but wrong message: ' + e.args[0] try: erfa.dat(200, [1, 34, 2], [1, 1, 43], 0.5) except erfa.ErfaError as e: if 'warning' in e.args[0]: assert False, 'Raised the correct type of error, but there were warnings mixed in: ' + e.args[0] def test_vector_inouts(): """ Tests that ERFA functions working with vectors are correctly consumed and spit out """ # values are from test_erfa.c t_ab function pnat = [-0.76321968546737951, -0.60869453983060384, -0.21676408580639883] v = [2.1044018893653786e-5, -8.9108923304429319e-5, -3.8633714797716569e-5] s = 0.99980921395708788 bm1 = 0.99999999506209258 expected = [-0.7631631094219556269, -0.6087553082505590832, -0.2167926269368471279] res = erfa.ab(pnat, v, s, bm1) assert res.shape == (3,) np.testing.assert_allclose(res, expected) res2 = erfa.ab([pnat]*4, v, s, bm1) assert res2.shape == (4, 3) np.testing.assert_allclose(res2, [expected]*4) # here we stride an array and also do it Fortran-order to make sure # it all still works correctly with non-contig arrays pnata = np.array(pnat) arrin = np.array([pnata, pnata/2, pnata/3, pnata/4, pnata/5]*4, order='F') res3 = erfa.ab(arrin[::5], v, s, bm1) assert res3.shape == (4, 3) np.testing.assert_allclose(res3, [expected]*4) def test_matrix_in(): jd1 = 2456165.5 jd2 = 0.401182685 pvmat = np.empty((2, 3)) pvmat[0][0] = -6241497.16 pvmat[0][1] = 401346.896 pvmat[0][2] = -1251136.04 pvmat[1][0] = -29.264597 pvmat[1][1] = -455.021831 pvmat[1][2] = 0.0266151194 astrom = erfa.apcs13(jd1, jd2, pvmat) assert astrom.shape == () # values from t_erfa_c np.testing.assert_allclose(astrom['pmt'], 12.65133794027378508) np.testing.assert_allclose(astrom['em'], 1.010428384373318379) np.testing.assert_allclose(astrom['eb'], [.9012691529023298391, -.4173999812023068781, -.1809906511146821008]) np.testing.assert_allclose(astrom['bpn'], np.eye(3)) # first make sure it *fails* if we mess with the input orders pvmatbad = np.roll(pvmat.ravel(), 1).reshape((2, 3)) astrombad = erfa.apcs13(jd1, jd2, pvmatbad) assert not np.allclose(astrombad['em'], 1.010428384373318379) pvmatarr = np.array([pvmat]*3) astrom2 = erfa.apcs13(jd1, jd2, pvmatarr) assert astrom2.shape == (3,) np.testing.assert_allclose(astrom2['em'], 1.010428384373318379) # try striding of the input array to make non-contiguous pvmatarr = np.array([pvmat]*9)[::3] astrom3 = erfa.apcs13(jd1, jd2, pvmatarr) assert astrom3.shape == (3,) np.testing.assert_allclose(astrom3['em'], 1.010428384373318379) # try fortran-order pvmatarr = np.array([pvmat]*3, order='F') astrom4 = erfa.apcs13(jd1, jd2, pvmatarr) assert astrom4.shape == (3,) np.testing.assert_allclose(astrom4['em'], 1.010428384373318379) def test_structs(): """ Checks producing and consuming of ERFA c structs """ am, eo = erfa.apci13(2456165.5, [0.401182685, 1]) assert am.shape == (2, ) assert am.dtype == erfa.dt_eraASTROM assert eo.shape == (2, ) # a few spotchecks from test_erfa.c np.testing.assert_allclose(am[0]['pmt'], 12.65133794027378508) np.testing.assert_allclose(am[0]['v'], [0.4289638897157027528e-4, 0.8115034002544663526e-4, 0.3517555122593144633e-4]) ri, di = erfa.atciqz(2.71, 0.174, am[0]) np.testing.assert_allclose(ri, 2.709994899247599271) np.testing.assert_allclose(di, 0.1728740720983623469)
0232375f8b24cae39732532d470602e64bdce7a0021f3ef958ba7f6b52b77b18
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from numpy.testing import assert_almost_equal from numpy.testing import assert_allclose from ...utils.data import get_pkg_data_contents, get_pkg_data_filename from ...time import Time from ... import units as u from ..wcs import WCS from ..utils import (proj_plane_pixel_scales, is_proj_plane_distorted, non_celestial_pixel_scales, wcs_to_celestial_frame, celestial_frame_to_wcs, skycoord_to_pixel, pixel_to_skycoord, custom_wcs_to_frame_mappings, custom_frame_to_wcs_mappings, add_stokes_axis_to_wcs) def test_wcs_dropping(): wcs = WCS(naxis=4) wcs.wcs.pc = np.zeros([4, 4]) np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5)) pc = wcs.wcs.pc # for later use below dropped = wcs.dropaxis(0) assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4])) dropped = wcs.dropaxis(1) assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4])) dropped = wcs.dropaxis(2) assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4])) dropped = wcs.dropaxis(3) assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3])) wcs = WCS(naxis=4) wcs.wcs.cd = pc dropped = wcs.dropaxis(0) assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4])) dropped = wcs.dropaxis(1) assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4])) dropped = wcs.dropaxis(2) assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4])) dropped = wcs.dropaxis(3) assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3])) def test_wcs_swapping(): wcs = WCS(naxis=4) wcs.wcs.pc = np.zeros([4, 4]) np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5)) pc = wcs.wcs.pc # for later use below swapped = wcs.swapaxes(0, 1) assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4])) swapped = wcs.swapaxes(0, 3) assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1])) swapped = wcs.swapaxes(2, 3) assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3])) wcs = WCS(naxis=4) wcs.wcs.cd = pc swapped = wcs.swapaxes(0, 1) assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4])) swapped = wcs.swapaxes(0, 3) assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1])) swapped = wcs.swapaxes(2, 3) assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3])) @pytest.mark.parametrize('ndim', (2, 3)) def test_add_stokes(ndim): wcs = WCS(naxis=ndim) for ii in range(ndim + 1): outwcs = add_stokes_axis_to_wcs(wcs, ii) assert outwcs.wcs.naxis == ndim + 1 assert outwcs.wcs.ctype[ii] == 'STOKES' assert outwcs.wcs.cname[ii] == 'STOKES' def test_slice(): mywcs = WCS(naxis=2) mywcs.wcs.crval = [1, 1] mywcs.wcs.cdelt = [0.1, 0.1] mywcs.wcs.crpix = [1, 1] mywcs._naxis = [1000, 500] slice_wcs = mywcs.slice([slice(1, None), slice(0, None)]) assert np.all(slice_wcs.wcs.crpix == np.array([1, 0])) assert slice_wcs._naxis == [1000, 499] slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)]) assert np.all(slice_wcs.wcs.crpix == np.array([0.625, 0.25])) assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2])) assert slice_wcs._naxis == [250, 250] slice_wcs = mywcs.slice([slice(None, None, 2), slice(0, None, 2)]) assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.2])) assert slice_wcs._naxis == [500, 250] # Non-integral values do not alter the naxis attribute slice_wcs = mywcs.slice([slice(50.), slice(20.)]) assert slice_wcs._naxis == [1000, 500] slice_wcs = mywcs.slice([slice(50.), slice(20)]) assert slice_wcs._naxis == [20, 500] slice_wcs = mywcs.slice([slice(50), slice(20.5)]) assert slice_wcs._naxis == [1000, 50] def test_slice_getitem(): mywcs = WCS(naxis=2) mywcs.wcs.crval = [1, 1] mywcs.wcs.cdelt = [0.1, 0.1] mywcs.wcs.crpix = [1, 1] slice_wcs = mywcs[1::2, 0::4] assert np.all(slice_wcs.wcs.crpix == np.array([0.625, 0.25])) assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2])) mywcs.wcs.crpix = [2, 2] slice_wcs = mywcs[1::2, 0::4] assert np.all(slice_wcs.wcs.crpix == np.array([0.875, 0.75])) assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2])) # Default: numpy order slice_wcs = mywcs[1::2] assert np.all(slice_wcs.wcs.crpix == np.array([2, 0.75])) assert np.all(slice_wcs.wcs.cdelt == np.array([0.1, 0.2])) def test_slice_fitsorder(): mywcs = WCS(naxis=2) mywcs.wcs.crval = [1, 1] mywcs.wcs.cdelt = [0.1, 0.1] mywcs.wcs.crpix = [1, 1] slice_wcs = mywcs.slice([slice(1, None), slice(0, None)], numpy_order=False) assert np.all(slice_wcs.wcs.crpix == np.array([0, 1])) slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)], numpy_order=False) assert np.all(slice_wcs.wcs.crpix == np.array([0.25, 0.625])) assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.4])) slice_wcs = mywcs.slice([slice(1, None, 2)], numpy_order=False) assert np.all(slice_wcs.wcs.crpix == np.array([0.25, 1])) assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.1])) def test_invalid_slice(): mywcs = WCS(naxis=2) with pytest.raises(ValueError) as exc: mywcs[0] assert exc.value.args[0] == ("Cannot downsample a WCS with indexing. Use " "wcs.sub or wcs.dropaxis if you want to remove " "axes.") with pytest.raises(ValueError) as exc: mywcs[0, ::2] assert exc.value.args[0] == ("Cannot downsample a WCS with indexing. Use " "wcs.sub or wcs.dropaxis if you want to remove " "axes.") def test_axis_names(): mywcs = WCS(naxis=4) mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT-LSR', 'STOKES'] assert mywcs.axis_type_names == ['RA', 'DEC', 'VOPT', 'STOKES'] mywcs.wcs.cname = ['RA', 'DEC', 'VOPT', 'STOKES'] assert mywcs.axis_type_names == ['RA', 'DEC', 'VOPT', 'STOKES'] def test_celestial(): mywcs = WCS(naxis=4) mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT', 'STOKES'] cel = mywcs.celestial assert tuple(cel.wcs.ctype) == ('RA---TAN', 'DEC--TAN') assert cel.axis_type_names == ['RA', 'DEC'] def test_wcs_to_celestial_frame(): # Import astropy.coordinates here to avoid circular imports from ...coordinates.builtin_frames import ICRS, FK5, FK4, Galactic mywcs = WCS(naxis=2) with pytest.raises(ValueError) as exc: assert wcs_to_celestial_frame(mywcs) is None assert exc.value.args[0] == "Could not determine celestial frame corresponding to the specified WCS object" mywcs.wcs.ctype = ['XOFFSET', 'YOFFSET'] with pytest.raises(ValueError): assert wcs_to_celestial_frame(mywcs) is None mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN'] frame = wcs_to_celestial_frame(mywcs) assert isinstance(frame, ICRS) mywcs.wcs.equinox = 1987. frame = wcs_to_celestial_frame(mywcs) assert isinstance(frame, FK5) assert frame.equinox == Time(1987., format='jyear') mywcs.wcs.equinox = 1982 frame = wcs_to_celestial_frame(mywcs) assert isinstance(frame, FK4) assert frame.equinox == Time(1982., format='byear') mywcs.wcs.equinox = np.nan mywcs.wcs.ctype = ['GLON-SIN', 'GLAT-SIN'] frame = wcs_to_celestial_frame(mywcs) assert isinstance(frame, Galactic) mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN'] mywcs.wcs.radesys = 'ICRS' for equinox in [np.nan, 1987, 1982]: mywcs.wcs.equinox = equinox frame = wcs_to_celestial_frame(mywcs) assert isinstance(frame, ICRS) # Flipped order mywcs = WCS(naxis=2) mywcs.wcs.ctype = ['DEC--TAN', 'RA---TAN'] frame = wcs_to_celestial_frame(mywcs) assert isinstance(frame, ICRS) # More than two dimensions mywcs = WCS(naxis=3) mywcs.wcs.ctype = ['DEC--TAN', 'VELOCITY', 'RA---TAN'] frame = wcs_to_celestial_frame(mywcs) assert isinstance(frame, ICRS) def test_wcs_to_celestial_frame_extend(): mywcs = WCS(naxis=2) mywcs.wcs.ctype = ['XOFFSET', 'YOFFSET'] with pytest.raises(ValueError): wcs_to_celestial_frame(mywcs) class OffsetFrame: pass def identify_offset(wcs): if wcs.wcs.ctype[0].endswith('OFFSET') and wcs.wcs.ctype[1].endswith('OFFSET'): return OffsetFrame() with custom_wcs_to_frame_mappings(identify_offset): frame = wcs_to_celestial_frame(mywcs) assert isinstance(frame, OffsetFrame) # Check that things are back to normal after the context manager with pytest.raises(ValueError): wcs_to_celestial_frame(mywcs) def test_celestial_frame_to_wcs(): # Import astropy.coordinates here to avoid circular imports from ...coordinates import ICRS, FK5, FK4, FK4NoETerms, Galactic, BaseCoordinateFrame class FakeFrame(BaseCoordinateFrame): pass frame = FakeFrame() with pytest.raises(ValueError) as exc: celestial_frame_to_wcs(frame) assert exc.value.args[0] == ("Could not determine WCS corresponding to " "the specified coordinate frame.") frame = ICRS() mywcs = celestial_frame_to_wcs(frame) mywcs.wcs.set() assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN') assert mywcs.wcs.radesys == 'ICRS' assert np.isnan(mywcs.wcs.equinox) assert mywcs.wcs.lonpole == 180 assert mywcs.wcs.latpole == 0 frame = FK5(equinox='J1987') mywcs = celestial_frame_to_wcs(frame) assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN') assert mywcs.wcs.radesys == 'FK5' assert mywcs.wcs.equinox == 1987. frame = FK4(equinox='B1982') mywcs = celestial_frame_to_wcs(frame) assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN') assert mywcs.wcs.radesys == 'FK4' assert mywcs.wcs.equinox == 1982. frame = FK4NoETerms(equinox='B1982') mywcs = celestial_frame_to_wcs(frame) assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN') assert mywcs.wcs.radesys == 'FK4-NO-E' assert mywcs.wcs.equinox == 1982. frame = Galactic() mywcs = celestial_frame_to_wcs(frame) assert tuple(mywcs.wcs.ctype) == ('GLON-TAN', 'GLAT-TAN') assert mywcs.wcs.radesys == '' assert np.isnan(mywcs.wcs.equinox) frame = Galactic() mywcs = celestial_frame_to_wcs(frame, projection='CAR') assert tuple(mywcs.wcs.ctype) == ('GLON-CAR', 'GLAT-CAR') assert mywcs.wcs.radesys == '' assert np.isnan(mywcs.wcs.equinox) frame = Galactic() mywcs = celestial_frame_to_wcs(frame, projection='CAR') mywcs.wcs.crval = [100, -30] mywcs.wcs.set() assert_allclose((mywcs.wcs.lonpole, mywcs.wcs.latpole), (180, 60)) def test_celestial_frame_to_wcs_extend(): class OffsetFrame: pass frame = OffsetFrame() with pytest.raises(ValueError): celestial_frame_to_wcs(frame) def identify_offset(frame, projection=None): if isinstance(frame, OffsetFrame): wcs = WCS(naxis=2) wcs.wcs.ctype = ['XOFFSET', 'YOFFSET'] return wcs with custom_frame_to_wcs_mappings(identify_offset): mywcs = celestial_frame_to_wcs(frame) assert tuple(mywcs.wcs.ctype) == ('XOFFSET', 'YOFFSET') # Check that things are back to normal after the context manager with pytest.raises(ValueError): celestial_frame_to_wcs(frame) def test_pixscale_nodrop(): mywcs = WCS(naxis=2) mywcs.wcs.cdelt = [0.1, 0.2] mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN'] assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2)) mywcs.wcs.cdelt = [-0.1, 0.2] assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2)) def test_pixscale_withdrop(): mywcs = WCS(naxis=3) mywcs.wcs.cdelt = [0.1, 0.2, 1] mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT'] assert_almost_equal(proj_plane_pixel_scales(mywcs.celestial), (0.1, 0.2)) mywcs.wcs.cdelt = [-0.1, 0.2, 1] assert_almost_equal(proj_plane_pixel_scales(mywcs.celestial), (0.1, 0.2)) def test_pixscale_cd(): mywcs = WCS(naxis=2) mywcs.wcs.cd = [[-0.1, 0], [0, 0.2]] mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN'] assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2)) @pytest.mark.parametrize('angle', (30, 45, 60, 75)) def test_pixscale_cd_rotated(angle): mywcs = WCS(naxis=2) rho = np.radians(angle) scale = 0.1 mywcs.wcs.cd = [[scale * np.cos(rho), -scale * np.sin(rho)], [scale * np.sin(rho), scale * np.cos(rho)]] mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN'] assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.1)) @pytest.mark.parametrize('angle', (30, 45, 60, 75)) def test_pixscale_pc_rotated(angle): mywcs = WCS(naxis=2) rho = np.radians(angle) scale = 0.1 mywcs.wcs.cdelt = [-scale, scale] mywcs.wcs.pc = [[np.cos(rho), -np.sin(rho)], [np.sin(rho), np.cos(rho)]] mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN'] assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.1)) @pytest.mark.parametrize(('cdelt', 'pc', 'pccd'), (([0.1, 0.2], np.eye(2), np.diag([0.1, 0.2])), ([0.1, 0.2, 0.3], np.eye(3), np.diag([0.1, 0.2, 0.3])), ([1, 1, 1], np.diag([0.1, 0.2, 0.3]), np.diag([0.1, 0.2, 0.3])))) def test_pixel_scale_matrix(cdelt, pc, pccd): mywcs = WCS(naxis=(len(cdelt))) mywcs.wcs.cdelt = cdelt mywcs.wcs.pc = pc assert_almost_equal(mywcs.pixel_scale_matrix, pccd) @pytest.mark.parametrize(('ctype', 'cel'), ((['RA---TAN', 'DEC--TAN'], True), (['RA---TAN', 'DEC--TAN', 'FREQ'], False), (['RA---TAN', 'FREQ'], False),)) def test_is_celestial(ctype, cel): mywcs = WCS(naxis=len(ctype)) mywcs.wcs.ctype = ctype assert mywcs.is_celestial == cel @pytest.mark.parametrize(('ctype', 'cel'), ((['RA---TAN', 'DEC--TAN'], True), (['RA---TAN', 'DEC--TAN', 'FREQ'], True), (['RA---TAN', 'FREQ'], False),)) def test_has_celestial(ctype, cel): mywcs = WCS(naxis=len(ctype)) mywcs.wcs.ctype = ctype assert mywcs.has_celestial == cel @pytest.mark.parametrize(('cdelt', 'pc', 'cd'), ((np.array([0.1, 0.2]), np.eye(2), np.eye(2)), (np.array([1, 1]), np.diag([0.1, 0.2]), np.eye(2)), (np.array([0.1, 0.2]), np.eye(2), None), (np.array([0.1, 0.2]), None, np.eye(2)), )) def test_noncelestial_scale(cdelt, pc, cd): mywcs = WCS(naxis=2) if cd is not None: mywcs.wcs.cd = cd if pc is not None: mywcs.wcs.pc = pc mywcs.wcs.cdelt = cdelt mywcs.wcs.ctype = ['RA---TAN', 'FREQ'] ps = non_celestial_pixel_scales(mywcs) assert_almost_equal(ps.to_value(u.deg), np.array([0.1, 0.2])) @pytest.mark.parametrize('mode', ['all', 'wcs']) def test_skycoord_to_pixel(mode): # Import astropy.coordinates here to avoid circular imports from ...coordinates import SkyCoord header = get_pkg_data_contents('maps/1904-66_TAN.hdr', encoding='binary') wcs = WCS(header) ref = SkyCoord(0.1 * u.deg, -89. * u.deg, frame='icrs') xp, yp = skycoord_to_pixel(ref, wcs, mode=mode) # WCS is in FK5 so we need to transform back to ICRS new = pixel_to_skycoord(xp, yp, wcs, mode=mode).transform_to('icrs') assert_allclose(new.ra.degree, ref.ra.degree) assert_allclose(new.dec.degree, ref.dec.degree) # Make sure you can specify a different class using ``cls`` keyword class SkyCoord2(SkyCoord): pass new2 = pixel_to_skycoord(xp, yp, wcs, mode=mode, cls=SkyCoord2).transform_to('icrs') assert new2.__class__ is SkyCoord2 assert_allclose(new2.ra.degree, ref.ra.degree) assert_allclose(new2.dec.degree, ref.dec.degree) def test_is_proj_plane_distorted(): # non-orthogonal CD: wcs = WCS(naxis=2) wcs.wcs.cd = [[-0.1, 0], [0, 0.2]] wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN'] assert(is_proj_plane_distorted(wcs)) # almost orthogonal CD: wcs.wcs.cd = [[0.1 + 2.0e-7, 1.7e-7], [1.2e-7, 0.1 - 1.3e-7]] assert(not is_proj_plane_distorted(wcs)) # real case: header = get_pkg_data_filename('data/sip.fits') wcs = WCS(header) assert(is_proj_plane_distorted(wcs)) @pytest.mark.parametrize('mode', ['all', 'wcs']) def test_skycoord_to_pixel_distortions(mode): # Import astropy.coordinates here to avoid circular imports from ...coordinates import SkyCoord header = get_pkg_data_filename('data/sip.fits') wcs = WCS(header) ref = SkyCoord(202.50 * u.deg, 47.19 * u.deg, frame='icrs') xp, yp = skycoord_to_pixel(ref, wcs, mode=mode) # WCS is in FK5 so we need to transform back to ICRS new = pixel_to_skycoord(xp, yp, wcs, mode=mode).transform_to('icrs') assert_allclose(new.ra.degree, ref.ra.degree) assert_allclose(new.dec.degree, ref.dec.degree)
f6e9282f9a8fd84da014331b0786fc2959e2489e6be6c0e5c16f4048566401fa
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import pytest import numpy as np from ...utils.data import get_pkg_data_filenames, get_pkg_data_contents from ...utils.misc import NumpyRNGContext from ... import wcs # hdr_map_file_list = list(get_pkg_data_filenames("maps", pattern="*.hdr")) # use the base name of the file, because everything we yield # will show up in the test name in the pandokia report hdr_map_file_list = [os.path.basename(fname) for fname in get_pkg_data_filenames("maps", pattern="*.hdr")] # Checking the number of files before reading them in. # OLD COMMENTS: # AFTER we tested with every file that we found, check to see that we # actually have the list we expect. If N=0, we will not have performed # any tests at all. If N < n_data_files, we are missing some files, # so we will have skipped some tests. Without this check, both cases # happen silently! def test_read_map_files(): # how many map files we expect to see n_map_files = 28 assert len(hdr_map_file_list) == n_map_files, ( "test_read_map_files has wrong number data files: found {}, expected " " {}".format(len(hdr_map_file_list), n_map_files)) @pytest.mark.parametrize("filename", hdr_map_file_list) def test_map(filename): header = get_pkg_data_contents(os.path.join("maps", filename)) wcsobj = wcs.WCS(header) with NumpyRNGContext(123456789): x = np.random.rand(2 ** 12, wcsobj.wcs.naxis) world = wcsobj.wcs_pix2world(x, 1) pix = wcsobj.wcs_world2pix(x, 1) hdr_spec_file_list = [os.path.basename(fname) for fname in get_pkg_data_filenames("spectra", pattern="*.hdr")] def test_read_spec_files(): # how many spec files expected n_spec_files = 6 assert len(hdr_spec_file_list) == n_spec_files, ( "test_spectra has wrong number data files: found {}, expected " " {}".format(len(hdr_spec_file_list), n_spec_files)) # b.t.w. If this assert happens, py.test reports one more test # than it would have otherwise. @pytest.mark.parametrize("filename", hdr_spec_file_list) def test_spectrum(filename): header = get_pkg_data_contents(os.path.join("spectra", filename)) wcsobj = wcs.WCS(header) with NumpyRNGContext(123456789): x = np.random.rand(2 ** 16, wcsobj.wcs.naxis) world = wcsobj.wcs_pix2world(x, 1) pix = wcsobj.wcs_world2pix(x, 1)
105af9aff83e53dcd11d7949f9214fa812cae505c8c1a4265ecdd1c0c2b51b51
# Licensed under a 3-clause BSD style license - see LICENSE.rst import io import os import warnings from datetime import datetime import pytest import numpy as np from numpy.testing import ( assert_allclose, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_array_equal) from ...tests.helper import raises, catch_warnings from ... import wcs from .. import _wcs from ...utils.data import ( get_pkg_data_filenames, get_pkg_data_contents, get_pkg_data_filename) from ...utils.misc import NumpyRNGContext from ...io import fits class TestMaps: def setup(self): # get the list of the hdr files that we want to test self._file_list = list(get_pkg_data_filenames("maps", pattern="*.hdr")) def test_consistency(self): # Check to see that we actually have the list we expect, so that we # do not get in a situation where the list is empty or incomplete and # the tests still seem to pass correctly. # how many do we expect to see? n_data_files = 28 assert len(self._file_list) == n_data_files, ( "test_spectra has wrong number data files: found {}, expected " " {}".format(len(self._file_list), n_data_files)) def test_maps(self): for filename in self._file_list: # use the base name of the file, so we get more useful messages # for failing tests. filename = os.path.basename(filename) # Now find the associated file in the installed wcs test directory. header = get_pkg_data_contents( os.path.join("maps", filename), encoding='binary') # finally run the test. wcsobj = wcs.WCS(header) world = wcsobj.wcs_pix2world([[97, 97]], 1) assert_array_almost_equal(world, [[285.0, -66.25]], decimal=1) pix = wcsobj.wcs_world2pix([[285.0, -66.25]], 1) assert_array_almost_equal(pix, [[97, 97]], decimal=0) class TestSpectra: def setup(self): self._file_list = list(get_pkg_data_filenames("spectra", pattern="*.hdr")) def test_consistency(self): # Check to see that we actually have the list we expect, so that we # do not get in a situation where the list is empty or incomplete and # the tests still seem to pass correctly. # how many do we expect to see? n_data_files = 6 assert len(self._file_list) == n_data_files, ( "test_spectra has wrong number data files: found {}, expected " " {}".format(len(self._file_list), n_data_files)) def test_spectra(self): for filename in self._file_list: # use the base name of the file, so we get more useful messages # for failing tests. filename = os.path.basename(filename) # Now find the associated file in the installed wcs test directory. header = get_pkg_data_contents( os.path.join("spectra", filename), encoding='binary') # finally run the test. all_wcs = wcs.find_all_wcs(header) assert len(all_wcs) == 9 def test_fixes(): """ From github issue #36 """ def run(): header = get_pkg_data_contents( 'data/nonstandard_units.hdr', encoding='binary') try: w = wcs.WCS(header, translate_units='dhs') except wcs.InvalidTransformError: pass else: assert False, "Expected InvalidTransformError" with catch_warnings(wcs.FITSFixedWarning) as w: run() assert len(w) == 2 for item in w: if 'unitfix' in str(item.message): assert 'Hz' in str(item.message) assert 'M/S' in str(item.message) assert 'm/s' in str(item.message) def test_outside_sky(): """ From github issue #107 """ header = get_pkg_data_contents( 'data/outside_sky.hdr', encoding='binary') w = wcs.WCS(header) assert np.all(np.isnan(w.wcs_pix2world([[100., 500.]], 0))) # outside sky assert np.all(np.isnan(w.wcs_pix2world([[200., 200.]], 0))) # outside sky assert not np.any(np.isnan(w.wcs_pix2world([[1000., 1000.]], 0))) def test_pix2world(): """ From github issue #1463 """ # TODO: write this to test the expected output behavior of pix2world, # currently this just makes sure it doesn't error out in unexpected ways filename = get_pkg_data_filename('data/sip2.fits') with catch_warnings(wcs.wcs.FITSFixedWarning) as caught_warnings: # this raises a warning unimportant for this testing the pix2world # FITSFixedWarning(u'The WCS transformation has more axes (2) than the # image it is associated with (0)') ww = wcs.WCS(filename) # might as well monitor for changing behavior assert len(caught_warnings) == 1 n = 3 pixels = (np.arange(n) * np.ones((2, n))).T result = ww.wcs_pix2world(pixels, 0, ra_dec_order=True) # Catch #2791 ww.wcs_pix2world(pixels[..., 0], pixels[..., 1], 0, ra_dec_order=True) close_enough = 1e-8 # assuming that the data of sip2.fits doesn't change answer = np.array([[0.00024976, 0.00023018], [0.00023043, -0.00024997]]) assert np.all(np.abs(ww.wcs.pc - answer) < close_enough) answer = np.array([[202.39265216, 47.17756518], [202.39335826, 47.17754619], [202.39406436, 47.1775272]]) assert np.all(np.abs(result - answer) < close_enough) def test_load_fits_path(): fits_name = get_pkg_data_filename('data/sip.fits') w = wcs.WCS(fits_name) def test_dict_init(): """ Test that WCS can be initialized with a dict-like object """ # Dictionary with no actual WCS, returns identity transform w = wcs.WCS({}) xp, yp = w.wcs_world2pix(41., 2., 1) assert_array_almost_equal_nulp(xp, 41., 10) assert_array_almost_equal_nulp(yp, 2., 10) # Valid WCS w = wcs.WCS({'CTYPE1': 'GLON-CAR', 'CTYPE2': 'GLAT-CAR', 'CUNIT1': 'deg', 'CUNIT2': 'deg', 'CRPIX1': 1, 'CRPIX2': 1, 'CRVAL1': 40., 'CRVAL2': 0., 'CDELT1': -0.1, 'CDELT2': 0.1}) xp, yp = w.wcs_world2pix(41., 2., 0) assert_array_almost_equal_nulp(xp, -10., 10) assert_array_almost_equal_nulp(yp, 20., 10) @raises(TypeError) def test_extra_kwarg(): """ Issue #444 """ w = wcs.WCS() with NumpyRNGContext(123456789): data = np.random.rand(100, 2) w.wcs_pix2world(data, origin=1) def test_3d_shapes(): """ Issue #444 """ w = wcs.WCS(naxis=3) with NumpyRNGContext(123456789): data = np.random.rand(100, 3) result = w.wcs_pix2world(data, 1) assert result.shape == (100, 3) result = w.wcs_pix2world( data[..., 0], data[..., 1], data[..., 2], 1) assert len(result) == 3 def test_preserve_shape(): w = wcs.WCS(naxis=2) x = np.random.random((2, 3, 4)) y = np.random.random((2, 3, 4)) xw, yw = w.wcs_pix2world(x, y, 1) assert xw.shape == (2, 3, 4) assert yw.shape == (2, 3, 4) xp, yp = w.wcs_world2pix(x, y, 1) assert xp.shape == (2, 3, 4) assert yp.shape == (2, 3, 4) def test_broadcasting(): w = wcs.WCS(naxis=2) x = np.random.random((2, 3, 4)) y = 1 xp, yp = w.wcs_world2pix(x, y, 1) assert xp.shape == (2, 3, 4) assert yp.shape == (2, 3, 4) def test_shape_mismatch(): w = wcs.WCS(naxis=2) x = np.random.random((2, 3, 4)) y = np.random.random((3, 2, 4)) with pytest.raises(ValueError) as exc: xw, yw = w.wcs_pix2world(x, y, 1) assert exc.value.args[0] == "Coordinate arrays are not broadcastable to each other" with pytest.raises(ValueError) as exc: xp, yp = w.wcs_world2pix(x, y, 1) assert exc.value.args[0] == "Coordinate arrays are not broadcastable to each other" # There are some ambiguities that need to be worked around when # naxis == 1 w = wcs.WCS(naxis=1) x = np.random.random((42, 1)) xw = w.wcs_pix2world(x, 1) assert xw.shape == (42, 1) x = np.random.random((42,)) xw, = w.wcs_pix2world(x, 1) assert xw.shape == (42,) def test_invalid_shape(): # Issue #1395 w = wcs.WCS(naxis=2) xy = np.random.random((2, 3)) with pytest.raises(ValueError) as exc: xy2 = w.wcs_pix2world(xy, 1) assert exc.value.args[0] == 'When providing two arguments, the array must be of shape (N, 2)' xy = np.random.random((2, 1)) with pytest.raises(ValueError) as exc: xy2 = w.wcs_pix2world(xy, 1) assert exc.value.args[0] == 'When providing two arguments, the array must be of shape (N, 2)' def test_warning_about_defunct_keywords(): def run(): header = get_pkg_data_contents( 'data/defunct_keywords.hdr', encoding='binary') w = wcs.WCS(header) with catch_warnings(wcs.FITSFixedWarning) as w: run() assert len(w) == 4 for item in w: assert 'PCi_ja' in str(item.message) # Make sure the warnings come out every time... with catch_warnings(wcs.FITSFixedWarning) as w: run() assert len(w) == 4 for item in w: assert 'PCi_ja' in str(item.message) def test_warning_about_defunct_keywords_exception(): def run(): header = get_pkg_data_contents( 'data/defunct_keywords.hdr', encoding='binary') w = wcs.WCS(header) with pytest.raises(wcs.FITSFixedWarning): warnings.simplefilter("error", wcs.FITSFixedWarning) run() # Restore warnings filter to previous state warnings.simplefilter("default") def test_to_header_string(): header_string = """ WCSAXES = 2 / Number of coordinate axes CRPIX1 = 0.0 / Pixel coordinate of reference point CRPIX2 = 0.0 / Pixel coordinate of reference point CDELT1 = 1.0 / Coordinate increment at reference point CDELT2 = 1.0 / Coordinate increment at reference point CRVAL1 = 0.0 / Coordinate value at reference point CRVAL2 = 0.0 / Coordinate value at reference point LATPOLE = 90.0 / [deg] Native latitude of celestial pole END""" w = wcs.WCS() h0 = fits.Header.fromstring(w.to_header_string().strip()) if 'COMMENT' in h0: del h0['COMMENT'] if '' in h0: del h0[''] h1 = fits.Header.fromstring(header_string.strip()) assert dict(h0) == dict(h1) def test_to_fits(): w = wcs.WCS() header_string = w.to_header() wfits = w.to_fits() assert isinstance(wfits, fits.HDUList) assert isinstance(wfits[0], fits.PrimaryHDU) assert header_string == wfits[0].header[-8:] def test_to_header_warning(): fits_name = get_pkg_data_filename('data/sip.fits') x = wcs.WCS(fits_name) with catch_warnings() as w: x.to_header() assert len(w) == 1 assert 'A_ORDER' in str(w[0]) def test_no_comments_in_header(): w = wcs.WCS() header = w.to_header() assert w.wcs.alt not in header assert 'COMMENT' + w.wcs.alt.strip() not in header assert 'COMMENT' not in header wkey = 'P' header = w.to_header(key=wkey) assert wkey not in header assert 'COMMENT' not in header assert 'COMMENT' + w.wcs.alt.strip() not in header @raises(wcs.InvalidTransformError) def test_find_all_wcs_crash(): """ Causes a double free without a recent fix in wcslib_wrap.C """ with open(get_pkg_data_filename("data/too_many_pv.hdr")) as fd: header = fd.read() # We have to set fix=False here, because one of the fixing tasks is to # remove redundant SCAMP distortion parameters when SIP distortion # parameters are also present. wcses = wcs.find_all_wcs(header, fix=False) def test_validate(): with catch_warnings(): results = wcs.validate(get_pkg_data_filename("data/validate.fits")) results_txt = repr(results) version = wcs._wcs.__version__ if version[0] == '5': if version >= '5.13': filename = 'data/validate.5.13.txt' else: filename = 'data/validate.5.0.txt' else: filename = 'data/validate.txt' with open(get_pkg_data_filename(filename), "r") as fd: lines = fd.readlines() assert set([x.strip() for x in lines]) == set([ x.strip() for x in results_txt.splitlines()]) def test_validate_with_2_wcses(): # From Issue #2053 results = wcs.validate(get_pkg_data_filename("data/2wcses.hdr")) assert "WCS key 'A':" in str(results) def test_all_world2pix(fname=None, ext=0, tolerance=1.0e-4, origin=0, random_npts=25000, adaptive=False, maxiter=20, detect_divergence=True): """Test all_world2pix, iterative inverse of all_pix2world""" # Open test FITS file: if fname is None: fname = get_pkg_data_filename('data/j94f05bgq_flt.fits') ext = ('SCI', 1) if not os.path.isfile(fname): raise OSError("Input file '{:s}' to 'test_all_world2pix' not found." .format(fname)) h = fits.open(fname) w = wcs.WCS(h[ext].header, h) h.close() del h crpix = w.wcs.crpix ncoord = crpix.shape[0] # Assume that CRPIX is at the center of the image and that the image has # a power-of-2 number of pixels along each axis. Only use the central # 1/64 for this testing purpose: naxesi_l = list((7. / 16 * crpix).astype(int)) naxesi_u = list((9. / 16 * crpix).astype(int)) # Generate integer indices of pixels (image grid): img_pix = np.dstack([i.flatten() for i in np.meshgrid(*map(range, naxesi_l, naxesi_u))])[0] # Generage random data (in image coordinates): with NumpyRNGContext(123456789): rnd_pix = np.random.rand(random_npts, ncoord) # Scale random data to cover the central part of the image mwidth = 2 * (crpix * 1. / 8) rnd_pix = crpix - 0.5 * mwidth + (mwidth - 1) * rnd_pix # Reference pixel coordinates in image coordinate system (CS): test_pix = np.append(img_pix, rnd_pix, axis=0) # Reference pixel coordinates in sky CS using forward transformation: all_world = w.all_pix2world(test_pix, origin) try: runtime_begin = datetime.now() # Apply the inverse iterative process to pixels in world coordinates # to recover the pixel coordinates in image space. all_pix = w.all_world2pix( all_world, origin, tolerance=tolerance, adaptive=adaptive, maxiter=maxiter, detect_divergence=detect_divergence) runtime_end = datetime.now() except wcs.wcs.NoConvergence as e: runtime_end = datetime.now() ndiv = 0 if e.divergent is not None: ndiv = e.divergent.shape[0] print("There are {} diverging solutions.".format(ndiv)) print("Indices of diverging solutions:\n{}" .format(e.divergent)) print("Diverging solutions:\n{}\n" .format(e.best_solution[e.divergent])) print("Mean radius of the diverging solutions: {}" .format(np.mean( np.linalg.norm(e.best_solution[e.divergent], axis=1)))) print("Mean accuracy of the diverging solutions: {}\n" .format(np.mean( np.linalg.norm(e.accuracy[e.divergent], axis=1)))) else: print("There are no diverging solutions.") nslow = 0 if e.slow_conv is not None: nslow = e.slow_conv.shape[0] print("There are {} slowly converging solutions." .format(nslow)) print("Indices of slowly converging solutions:\n{}" .format(e.slow_conv)) print("Slowly converging solutions:\n{}\n" .format(e.best_solution[e.slow_conv])) else: print("There are no slowly converging solutions.\n") print("There are {} converged solutions." .format(e.best_solution.shape[0] - ndiv - nslow)) print("Best solutions (all points):\n{}" .format(e.best_solution)) print("Accuracy:\n{}\n".format(e.accuracy)) print("\nFinished running 'test_all_world2pix' with errors.\n" "ERROR: {}\nRun time: {}\n" .format(e.args[0], runtime_end - runtime_begin)) raise e # Compute differences between reference pixel coordinates and # pixel coordinates (in image space) recovered from reference # pixels in world coordinates: errors = np.sqrt(np.sum(np.power(all_pix - test_pix, 2), axis=1)) meanerr = np.mean(errors) maxerr = np.amax(errors) print("\nFinished running 'test_all_world2pix'.\n" "Mean error = {0:e} (Max error = {1:e})\n" "Run time: {2}\n" .format(meanerr, maxerr, runtime_end - runtime_begin)) assert(maxerr < 2.0 * tolerance) def test_scamp_sip_distortion_parameters(): """ Test parsing of WCS parameters with redundant SIP and SCAMP distortion parameters. """ header = get_pkg_data_contents('data/validate.fits', encoding='binary') w = wcs.WCS(header) # Just check that this doesn't raise an exception. w.all_pix2world(0, 0, 0) def test_fixes2(): """ From github issue #1854 """ header = get_pkg_data_contents( 'data/nonstandard_units.hdr', encoding='binary') with pytest.raises(wcs.InvalidTransformError): w = wcs.WCS(header, fix=False) def test_unit_normalization(): """ From github issue #1918 """ header = get_pkg_data_contents( 'data/unit.hdr', encoding='binary') w = wcs.WCS(header) assert w.wcs.cunit[2] == 'm/s' def test_footprint_to_file(tmpdir): """ From github issue #1912 """ # Arbitrary keywords from real data w = wcs.WCS({'CTYPE1': 'RA---ZPN', 'CRUNIT1': 'deg', 'CRPIX1': -3.3495999e+02, 'CRVAL1': 3.185790700000e+02, 'CTYPE2': 'DEC--ZPN', 'CRUNIT2': 'deg', 'CRPIX2': 3.0453999e+03, 'CRVAL2': 4.388538000000e+01, 'PV2_1': 1., 'PV2_3': 220.}) testfile = str(tmpdir.join('test.txt')) w.footprint_to_file(testfile) with open(testfile, 'r') as f: lines = f.readlines() assert len(lines) == 4 assert lines[2] == 'ICRS\n' assert 'color=green' in lines[3] w.footprint_to_file(testfile, coordsys='FK5', color='red') with open(testfile, 'r') as f: lines = f.readlines() assert len(lines) == 4 assert lines[2] == 'FK5\n' assert 'color=red' in lines[3] with pytest.raises(ValueError): w.footprint_to_file(testfile, coordsys='FOO') def test_validate_faulty_wcs(): """ From github issue #2053 """ h = fits.Header() # Illegal WCS: h['RADESYSA'] = 'ICRS' h['PV2_1'] = 1.0 hdu = fits.PrimaryHDU([[0]], header=h) hdulist = fits.HDUList([hdu]) # Check that this doesn't raise a NameError exception: wcs.validate(hdulist) def test_error_message(): header = get_pkg_data_contents( 'data/invalid_header.hdr', encoding='binary') with pytest.raises(wcs.InvalidTransformError): # Both lines are in here, because 0.4 calls .set within WCS.__init__, # whereas 0.3 and earlier did not. w = wcs.WCS(header, _do_set=False) c = w.all_pix2world([[536.0, 894.0]], 0) def test_out_of_bounds(): # See #2107 header = get_pkg_data_contents('data/zpn-hole.hdr', encoding='binary') w = wcs.WCS(header) ra, dec = w.wcs_pix2world(110, 110, 0) assert np.isnan(ra) assert np.isnan(dec) ra, dec = w.wcs_pix2world(0, 0, 0) assert not np.isnan(ra) assert not np.isnan(dec) def test_calc_footprint_1(): fits = get_pkg_data_filename('data/sip.fits') w = wcs.WCS(fits) axes = (1000, 1051) ref = np.array([[202.39314493, 47.17753352], [202.71885939, 46.94630488], [202.94631893, 47.15855022], [202.72053428, 47.37893142]]) footprint = w.calc_footprint(axes=axes) assert_allclose(footprint, ref) def test_calc_footprint_2(): """ Test calc_footprint without distortion. """ fits = get_pkg_data_filename('data/sip.fits') w = wcs.WCS(fits) axes = (1000, 1051) ref = np.array([[202.39265216, 47.17756518], [202.7469062, 46.91483312], [203.11487481, 47.14359319], [202.76092671, 47.40745948]]) footprint = w.calc_footprint(axes=axes, undistort=False) assert_allclose(footprint, ref) def test_calc_footprint_3(): """ Test calc_footprint with corner of the pixel.""" w = wcs.WCS() w.wcs.ctype = ["GLON-CAR", "GLAT-CAR"] w.wcs.crpix = [1.5, 5.5] w.wcs.cdelt = [-0.1, 0.1] axes = (2, 10) ref = np.array([[0.1, -0.5], [0.1, 0.5], [359.9, 0.5], [359.9, -0.5]]) footprint = w.calc_footprint(axes=axes, undistort=False, center=False) assert_allclose(footprint, ref) def test_sip(): # See #2107 header = get_pkg_data_contents('data/irac_sip.hdr', encoding='binary') w = wcs.WCS(header) x0, y0 = w.sip_pix2foc(200, 200, 0) assert_allclose(72, x0, 1e-3) assert_allclose(72, y0, 1e-3) x1, y1 = w.sip_foc2pix(x0, y0, 0) assert_allclose(200, x1, 1e-3) assert_allclose(200, y1, 1e-3) def test_printwcs(): """ Just make sure that it runs """ h = get_pkg_data_contents('spectra/orion-freq-1.hdr', encoding='binary') w = wcs.WCS(h) w.printwcs() h = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary') w = wcs.WCS(h) w.printwcs() def test_invalid_spherical(): header = """ SIMPLE = T / conforms to FITS standard BITPIX = 8 / array data type WCSAXES = 2 / no comment CTYPE1 = 'RA---TAN' / TAN (gnomic) projection CTYPE2 = 'DEC--TAN' / TAN (gnomic) projection EQUINOX = 2000.0 / Equatorial coordinates definition (yr) LONPOLE = 180.0 / no comment LATPOLE = 0.0 / no comment CRVAL1 = 16.0531567459 / RA of reference point CRVAL2 = 23.1148929108 / DEC of reference point CRPIX1 = 2129 / X reference pixel CRPIX2 = 1417 / Y reference pixel CUNIT1 = 'deg ' / X pixel scale units CUNIT2 = 'deg ' / Y pixel scale units CD1_1 = -0.00912247310646 / Transformation matrix CD1_2 = -0.00250608809647 / no comment CD2_1 = 0.00250608809647 / no comment CD2_2 = -0.00912247310646 / no comment IMAGEW = 4256 / Image width, in pixels. IMAGEH = 2832 / Image height, in pixels. """ f = io.StringIO(header) header = fits.Header.fromtextfile(f) w = wcs.WCS(header) x, y = w.wcs_world2pix(211, -26, 0) assert np.isnan(x) and np.isnan(y) def test_no_iteration(): # Regression test for #3066 w = wcs.WCS(naxis=2) with pytest.raises(TypeError) as exc: iter(w) assert exc.value.args[0] == "'WCS' object is not iterable" class NewWCS(wcs.WCS): pass w = NewWCS(naxis=2) with pytest.raises(TypeError) as exc: iter(w) assert exc.value.args[0] == "'NewWCS' object is not iterable" @pytest.mark.skipif('_wcs.__version__[0] < "5"', reason="TPV only works with wcslib 5.x or later") def test_sip_tpv_agreement(): sip_header = get_pkg_data_contents( os.path.join("data", "siponly.hdr"), encoding='binary') tpv_header = get_pkg_data_contents( os.path.join("data", "tpvonly.hdr"), encoding='binary') w_sip = wcs.WCS(sip_header) w_tpv = wcs.WCS(tpv_header) assert_array_almost_equal( w_sip.all_pix2world([w_sip.wcs.crpix], 1), w_tpv.all_pix2world([w_tpv.wcs.crpix], 1)) w_sip2 = wcs.WCS(w_sip.to_header()) w_tpv2 = wcs.WCS(w_tpv.to_header()) assert_array_almost_equal( w_sip.all_pix2world([w_sip.wcs.crpix], 1), w_sip2.all_pix2world([w_sip.wcs.crpix], 1)) assert_array_almost_equal( w_tpv.all_pix2world([w_sip.wcs.crpix], 1), w_tpv2.all_pix2world([w_sip.wcs.crpix], 1)) assert_array_almost_equal( w_sip2.all_pix2world([w_sip.wcs.crpix], 1), w_tpv2.all_pix2world([w_tpv.wcs.crpix], 1)) @pytest.mark.skipif('_wcs.__version__[0] < "5"', reason="TPV only works with wcslib 5.x or later") def test_tpv_copy(): # See #3904 tpv_header = get_pkg_data_contents( os.path.join("data", "tpvonly.hdr"), encoding='binary') w_tpv = wcs.WCS(tpv_header) ra, dec = w_tpv.wcs_pix2world([0, 100, 200], [0, -100, 200], 0) assert ra[0] != ra[1] and ra[1] != ra[2] assert dec[0] != dec[1] and dec[1] != dec[2] def test_hst_wcs(): path = get_pkg_data_filename("data/dist_lookup.fits.gz") hdulist = fits.open(path) # wcslib will complain about the distortion parameters if they # weren't correctly deleted from the header w = wcs.WCS(hdulist[1].header, hdulist) # Exercise the main transformation functions, mainly just for # coverage w.p4_pix2foc([0, 100, 200], [0, -100, 200], 0) w.det2im([0, 100, 200], [0, -100, 200], 0) w.cpdis1 = w.cpdis1 w.cpdis2 = w.cpdis2 w.det2im1 = w.det2im1 w.det2im2 = w.det2im2 w.sip = w.sip w.cpdis1.cdelt = w.cpdis1.cdelt w.cpdis1.crpix = w.cpdis1.crpix w.cpdis1.crval = w.cpdis1.crval w.cpdis1.data = w.cpdis1.data assert w.sip.a_order == 4 assert w.sip.b_order == 4 assert w.sip.ap_order == 0 assert w.sip.bp_order == 0 assert_array_equal(w.sip.crpix, [2048., 1024.]) wcs.WCS(hdulist[1].header, hdulist) hdulist.close() def test_list_naxis(): path = get_pkg_data_filename("data/dist_lookup.fits.gz") hdulist = fits.open(path) # wcslib will complain about the distortion parameters if they # weren't correctly deleted from the header w = wcs.WCS(hdulist[1].header, hdulist, naxis=['celestial']) assert w.naxis == 2 assert w.wcs.naxis == 2 path = get_pkg_data_filename("maps/1904-66_SIN.hdr") with open(path, 'rb') as fd: content = fd.read() w = wcs.WCS(content, naxis=['celestial']) assert w.naxis == 2 assert w.wcs.naxis == 2 w = wcs.WCS(content, naxis=['spectral']) assert w.naxis == 0 assert w.wcs.naxis == 0 hdulist.close() def test_sip_broken(): # This header caused wcslib to segfault because it has a SIP # specification in a non-default keyword hdr = get_pkg_data_contents("data/sip-broken.hdr") w = wcs.WCS(hdr) def test_no_truncate_crval(): """ Regression test for https://github.com/astropy/astropy/issues/4612 """ w = wcs.WCS(naxis=3) w.wcs.crval = [50, 50, 2.12345678e11] w.wcs.cdelt = [1e-3, 1e-3, 1e8] w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'FREQ'] w.wcs.set() header = w.to_header() for ii in range(3): assert header['CRVAL{0}'.format(ii + 1)] == w.wcs.crval[ii] assert header['CDELT{0}'.format(ii + 1)] == w.wcs.cdelt[ii] def test_no_truncate_crval_try2(): """ Regression test for https://github.com/astropy/astropy/issues/4612 """ w = wcs.WCS(naxis=3) w.wcs.crval = [50, 50, 2.12345678e11] w.wcs.cdelt = [1e-5, 1e-5, 1e5] w.wcs.ctype = ['RA---SIN', 'DEC--SIN', 'FREQ'] w.wcs.cunit = ['deg', 'deg', 'Hz'] w.wcs.crpix = [1, 1, 1] w.wcs.restfrq = 2.34e11 w.wcs.set() header = w.to_header() for ii in range(3): assert header['CRVAL{0}'.format(ii + 1)] == w.wcs.crval[ii] assert header['CDELT{0}'.format(ii + 1)] == w.wcs.cdelt[ii] def test_no_truncate_crval_p17(): """ Regression test for https://github.com/astropy/astropy/issues/5162 """ w = wcs.WCS(naxis=2) w.wcs.crval = [50.1234567890123456, 50.1234567890123456] w.wcs.cdelt = [1e-3, 1e-3] w.wcs.ctype = ['RA---TAN', 'DEC--TAN'] w.wcs.set() header = w.to_header() assert header['CRVAL1'] != w.wcs.crval[0] assert header['CRVAL2'] != w.wcs.crval[1] header = w.to_header(relax=wcs.WCSHDO_P17) assert header['CRVAL1'] == w.wcs.crval[0] assert header['CRVAL2'] == w.wcs.crval[1] def test_no_truncate_using_compare(): """ Regression test for https://github.com/astropy/astropy/issues/4612 This one uses WCS.wcs.compare and some slightly different values """ w = wcs.WCS(naxis=3) w.wcs.crval = [2.409303333333E+02, 50, 2.12345678e11] w.wcs.cdelt = [1e-3, 1e-3, 1e8] w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'FREQ'] w.wcs.set() w2 = wcs.WCS(w.to_header()) w.wcs.compare(w2.wcs) def test_passing_ImageHDU(): """ Passing ImageHDU or PrimaryHDU and comparing it with wcs initialized from header. For #4493. """ path = get_pkg_data_filename('data/validate.fits') hdulist = fits.open(path) wcs_hdu = wcs.WCS(hdulist[0]) wcs_header = wcs.WCS(hdulist[0].header) assert wcs_hdu.wcs.compare(wcs_header.wcs) wcs_hdu = wcs.WCS(hdulist[1]) wcs_header = wcs.WCS(hdulist[1].header) assert wcs_hdu.wcs.compare(wcs_header.wcs) hdulist.close() def test_inconsistent_sip(): """ Test for #4814 """ hdr = get_pkg_data_contents("data/sip-broken.hdr") w = wcs.WCS(hdr) newhdr = w.to_header(relax=None) # CTYPE should not include "-SIP" if relax is None wnew = wcs.WCS(newhdr) assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype) newhdr = w.to_header(relax=False) assert('A_0_2' not in newhdr) # CTYPE should not include "-SIP" if relax is False wnew = wcs.WCS(newhdr) assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype) newhdr = w.to_header(key="C") assert('A_0_2' not in newhdr) # Test writing header with a different key wnew = wcs.WCS(newhdr, key='C') assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype) newhdr = w.to_header(key=" ") # Test writing a primary WCS to header wnew = wcs.WCS(newhdr) assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype) # Test that "-SIP" is kept into CTYPE if relax=True and # "-SIP" was in the original header newhdr = w.to_header(relax=True) wnew = wcs.WCS(newhdr) assert all(ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype) assert('A_0_2' in newhdr) # Test that SIP coefficients are also written out. assert wnew.sip is not None # ######### broken header ########### # Test that "-SIP" is added to CTYPE if relax=True and # "-SIP" was not in the original header but SIP coefficients # are present. w = wcs.WCS(hdr) w.wcs.ctype = ['RA---TAN', 'DEC--TAN'] newhdr = w.to_header(relax=True) wnew = wcs.WCS(newhdr) assert all(ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype) def test_bounds_check(): """Test for #4957""" w = wcs.WCS(naxis=2) w.wcs.ctype = ["RA---CAR", "DEC--CAR"] w.wcs.cdelt = [10, 10] w.wcs.crval = [-90, 90] w.wcs.crpix = [1, 1] w.wcs.bounds_check(False, False) ra, dec = w.wcs_pix2world(300, 0, 0) assert_allclose(ra, -180) assert_allclose(dec, -30) def test_naxis(): w = wcs.WCS(naxis=2) w.wcs.crval = [1, 1] w.wcs.cdelt = [0.1, 0.1] w.wcs.crpix = [1, 1] w._naxis = [1000, 500] assert w._naxis1 == 1000 assert w._naxis2 == 500 w._naxis1 = 99 w._naxis2 = 59 assert w._naxis == [99, 59] def test_sip_with_altkey(): """ Test that when creating a WCS object using a key, CTYPE with that key is looked at and not the primary CTYPE. fix for #5443. """ with fits.open(get_pkg_data_filename('data/sip.fits')) as f: w = wcs.WCS(f[0].header) # create a header with two WCSs. h1 = w.to_header(relax=True, key='A') h2 = w.to_header(relax=False) h1['CTYPE1A'] = "RA---SIN-SIP" h1['CTYPE2A'] = "DEC--SIN-SIP" h1.update(h2) w = wcs.WCS(h1, key='A') assert (w.wcs.ctype == np.array(['RA---SIN-SIP', 'DEC--SIN-SIP'])).all() def test_to_fits_1(): """ Test to_fits() with LookupTable distortion. """ fits_name = get_pkg_data_filename('data/dist.fits') w = wcs.WCS(fits_name) wfits = w.to_fits() assert isinstance(wfits, fits.HDUList) assert isinstance(wfits[0], fits.PrimaryHDU) assert isinstance(wfits[1], fits.ImageHDU) def test_keyedsip(): """ Test sip reading with extra key. """ hdr_name = get_pkg_data_filename('data/sip-broken.hdr') header = fits.Header.fromfile(hdr_name) del header[str("CRPIX1")] del header[str("CRPIX2")] w=wcs.WCS(header=header,key="A") assert isinstance( w.sip, wcs.Sip ) assert w.sip.crpix[0] == 2048 assert w.sip.crpix[1] == 1026
81d8fcc0d26a8dae46f315bf643aa097e4d7c86f2243464dc545af8295117e82
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import pickle import numpy as np from numpy.testing import assert_array_almost_equal from ...utils.data import get_pkg_data_contents, get_pkg_data_fileobj from ...utils.misc import NumpyRNGContext from ...io import fits from ... import wcs def test_basic(): wcs1 = wcs.WCS() s = pickle.dumps(wcs1) wcs2 = pickle.loads(s) def test_dist(): with get_pkg_data_fileobj( os.path.join("data", "dist.fits"), encoding='binary') as test_file: hdulist = fits.open(test_file) wcs1 = wcs.WCS(hdulist[0].header, hdulist) assert wcs1.det2im2 is not None s = pickle.dumps(wcs1) wcs2 = pickle.loads(s) with NumpyRNGContext(123456789): x = np.random.rand(2 ** 16, wcs1.wcs.naxis) world1 = wcs1.all_pix2world(x, 1) world2 = wcs2.all_pix2world(x, 1) assert_array_almost_equal(world1, world2) def test_sip(): with get_pkg_data_fileobj( os.path.join("data", "sip.fits"), encoding='binary') as test_file: hdulist = fits.open(test_file, ignore_missing_end=True) wcs1 = wcs.WCS(hdulist[0].header) assert wcs1.sip is not None s = pickle.dumps(wcs1) wcs2 = pickle.loads(s) with NumpyRNGContext(123456789): x = np.random.rand(2 ** 16, wcs1.wcs.naxis) world1 = wcs1.all_pix2world(x, 1) world2 = wcs2.all_pix2world(x, 1) assert_array_almost_equal(world1, world2) def test_sip2(): with get_pkg_data_fileobj( os.path.join("data", "sip2.fits"), encoding='binary') as test_file: hdulist = fits.open(test_file, ignore_missing_end=True) wcs1 = wcs.WCS(hdulist[0].header) assert wcs1.sip is not None s = pickle.dumps(wcs1) wcs2 = pickle.loads(s) with NumpyRNGContext(123456789): x = np.random.rand(2 ** 16, wcs1.wcs.naxis) world1 = wcs1.all_pix2world(x, 1) world2 = wcs2.all_pix2world(x, 1) assert_array_almost_equal(world1, world2) def test_wcs(): header = get_pkg_data_contents( os.path.join("data", "outside_sky.hdr"), encoding='binary') wcs1 = wcs.WCS(header) s = pickle.dumps(wcs1) wcs2 = pickle.loads(s) with NumpyRNGContext(123456789): x = np.random.rand(2 ** 16, wcs1.wcs.naxis) world1 = wcs1.all_pix2world(x, 1) world2 = wcs2.all_pix2world(x, 1) assert_array_almost_equal(world1, world2) class Sub(wcs.WCS): def __init__(self, *args, **kwargs): self.foo = 42 def test_subclass(): wcs = Sub() s = pickle.dumps(wcs) wcs2 = pickle.loads(s) assert isinstance(wcs2, Sub) assert wcs.foo == 42 assert wcs2.foo == 42 assert wcs2.wcs is not None
0fd21ebac3ea3f5fe9e624349e642896d370642d9cb437023b74140e6b828dbd
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import gc import locale import re import pytest from numpy.testing import assert_array_equal, assert_array_almost_equal import numpy as np from ...tests.helper import raises, catch_warnings from ...io import fits from .. import wcs from .. import _wcs from ...utils.data import get_pkg_data_contents, get_pkg_data_fileobj, get_pkg_data_filename from ... import units as u ###################################################################### def test_alt(): w = _wcs.Wcsprm() assert w.alt == " " w.alt = "X" assert w.alt == "X" del w.alt assert w.alt == " " @raises(ValueError) def test_alt_invalid1(): w = _wcs.Wcsprm() w.alt = "$" @raises(ValueError) def test_alt_invalid2(): w = _wcs.Wcsprm() w.alt = " " def test_axis_types(): w = _wcs.Wcsprm() assert_array_equal(w.axis_types, [0, 0]) def test_cd(): w = _wcs.Wcsprm() w.cd = [[1, 0], [0, 1]] assert w.cd.dtype == float assert w.has_cd() is True assert_array_equal(w.cd, [[1, 0], [0, 1]]) del w.cd assert w.has_cd() is False @raises(AttributeError) def test_cd_missing(): w = _wcs.Wcsprm() assert w.has_cd() is False w.cd @raises(AttributeError) def test_cd_missing2(): w = _wcs.Wcsprm() w.cd = [[1, 0], [0, 1]] assert w.has_cd() is True del w.cd assert w.has_cd() is False w.cd @raises(ValueError) def test_cd_invalid(): w = _wcs.Wcsprm() w.cd = [1, 0, 0, 1] def test_cdfix(): w = _wcs.Wcsprm() w.cdfix() def test_cdelt(): w = _wcs.Wcsprm() assert_array_equal(w.cdelt, [1, 1]) w.cdelt = [42, 54] assert_array_equal(w.cdelt, [42, 54]) @raises(TypeError) def test_cdelt_delete(): w = _wcs.Wcsprm() del w.cdelt def test_cel_offset(): w = _wcs.Wcsprm() assert w.cel_offset is False w.cel_offset = 'foo' assert w.cel_offset is True w.cel_offset = 0 assert w.cel_offset is False def test_celfix(): # TODO: We need some data with -NCP or -GLS projections to test # with. For now, this is just a smoke test w = _wcs.Wcsprm() assert w.celfix() == -1 def test_cname(): w = _wcs.Wcsprm() # Test that this works as an iterator for x in w.cname: assert x == '' assert list(w.cname) == ['', ''] w.cname = [b'foo', 'bar'] assert list(w.cname) == ['foo', 'bar'] @raises(TypeError) def test_cname_invalid(): w = _wcs.Wcsprm() w.cname = [42, 54] def test_colax(): w = _wcs.Wcsprm() assert w.colax.dtype == np.intc assert_array_equal(w.colax, [0, 0]) w.colax = [42, 54] assert_array_equal(w.colax, [42, 54]) w.colax[0] = 0 assert_array_equal(w.colax, [0, 54]) with pytest.raises(ValueError): w.colax = [1, 2, 3] def test_colnum(): w = _wcs.Wcsprm() assert w.colnum == 0 w.colnum = 42 assert w.colnum == 42 with pytest.raises(OverflowError): w.colnum = 0xffffffffffffffffffff with pytest.raises(OverflowError): w.colnum = 0xffffffff with pytest.raises(TypeError): del w.colnum @raises(TypeError) def test_colnum_invalid(): w = _wcs.Wcsprm() w.colnum = 'foo' def test_crder(): w = _wcs.Wcsprm() assert w.crder.dtype == float assert np.all(np.isnan(w.crder)) w.crder[0] = 0 assert np.isnan(w.crder[1]) assert w.crder[0] == 0 w.crder = w.crder def test_crota(): w = _wcs.Wcsprm() w.crota = [1, 0] assert w.crota.dtype == float assert w.has_crota() is True assert_array_equal(w.crota, [1, 0]) del w.crota assert w.has_crota() is False @raises(AttributeError) def test_crota_missing(): w = _wcs.Wcsprm() assert w.has_crota() is False w.crota @raises(AttributeError) def test_crota_missing2(): w = _wcs.Wcsprm() w.crota = [1, 0] assert w.has_crota() is True del w.crota assert w.has_crota() is False w.crota def test_crpix(): w = _wcs.Wcsprm() assert w.crpix.dtype == float assert_array_equal(w.crpix, [0, 0]) w.crpix = [42, 54] assert_array_equal(w.crpix, [42, 54]) w.crpix[0] = 0 assert_array_equal(w.crpix, [0, 54]) with pytest.raises(ValueError): w.crpix = [1, 2, 3] def test_crval(): w = _wcs.Wcsprm() assert w.crval.dtype == float assert_array_equal(w.crval, [0, 0]) w.crval = [42, 54] assert_array_equal(w.crval, [42, 54]) w.crval[0] = 0 assert_array_equal(w.crval, [0, 54]) def test_csyer(): w = _wcs.Wcsprm() assert w.csyer.dtype == float assert np.all(np.isnan(w.csyer)) w.csyer[0] = 0 assert np.isnan(w.csyer[1]) assert w.csyer[0] == 0 w.csyer = w.csyer def test_ctype(): w = _wcs.Wcsprm() assert list(w.ctype) == ['', ''] w.ctype = [b'RA---TAN', 'DEC--TAN'] assert_array_equal(w.axis_types, [2200, 2201]) assert w.lat == 1 assert w.lng == 0 assert w.lattyp == 'DEC' assert w.lngtyp == 'RA' assert list(w.ctype) == ['RA---TAN', 'DEC--TAN'] w.ctype = ['foo', 'bar'] assert_array_equal(w.axis_types, [0, 0]) assert list(w.ctype) == ['foo', 'bar'] assert w.lat == -1 assert w.lng == -1 assert w.lattyp == 'DEC' assert w.lngtyp == 'RA' def test_ctype_repr(): w = _wcs.Wcsprm() assert list(w.ctype) == ['', ''] w.ctype = [b'RA-\t--TAN', 'DEC-\n-TAN'] assert repr(w.ctype == '["RA-\t--TAN", "DEC-\n-TAN"]') def test_ctype_index_error(): w = _wcs.Wcsprm() assert list(w.ctype) == ['', ''] with pytest.raises(IndexError): w.ctype[2] = 'FOO' def test_ctype_invalid_error(): w = _wcs.Wcsprm() assert list(w.ctype) == ['', ''] with pytest.raises(ValueError): w.ctype[0] = 'X' * 100 with pytest.raises(TypeError): w.ctype[0] = True with pytest.raises(TypeError): w.ctype = ['a', 0] with pytest.raises(TypeError): w.ctype = None with pytest.raises(ValueError): w.ctype = ['a', 'b', 'c'] with pytest.raises(ValueError): w.ctype = ['FOO', 'A' * 100] def test_cubeface(): w = _wcs.Wcsprm() assert w.cubeface == -1 w.cubeface = 0 with pytest.raises(OverflowError): w.cubeface = -1 def test_cunit(): w = _wcs.Wcsprm() assert list(w.cunit) == [u.Unit(''), u.Unit('')] w.cunit = [u.m, 'km'] assert w.cunit[0] == u.m assert w.cunit[1] == u.km def test_cunit_invalid(): w = _wcs.Wcsprm() with catch_warnings() as warns: w.cunit[0] = 'foo' assert len(warns) == 1 assert 'foo' in str(warns[0].message) def test_cunit_invalid2(): w = _wcs.Wcsprm() with catch_warnings() as warns: w.cunit = ['foo', 'bar'] assert len(warns) == 2 assert 'foo' in str(warns[0].message) assert 'bar' in str(warns[1].message) def test_unit(): w = wcs.WCS() w.wcs.cunit[0] = u.erg assert w.wcs.cunit[0] == u.erg assert repr(w.wcs.cunit) == "['erg', '']" def test_unit2(): w = wcs.WCS() myunit = u.Unit("FOOBAR", parse_strict="warn") w.wcs.cunit[0] = myunit def test_unit3(): w = wcs.WCS() with pytest.raises(IndexError): w.wcs.cunit[2] = u.m with pytest.raises(ValueError): w.wcs.cunit = [u.m, u.m, u.m] def test_unitfix(): w = _wcs.Wcsprm() w.unitfix() def test_cylfix(): # TODO: We need some data with broken cylindrical projections to # test with. For now, this is just a smoke test. w = _wcs.Wcsprm() assert w.cylfix() == -1 assert w.cylfix([0, 1]) == -1 with pytest.raises(ValueError): w.cylfix([0, 1, 2]) def test_dateavg(): w = _wcs.Wcsprm() assert w.dateavg == '' # TODO: When dateavg is verified, check that it works def test_dateobs(): w = _wcs.Wcsprm() assert w.dateobs == '' # TODO: When dateavg is verified, check that it works def test_datfix(): w = _wcs.Wcsprm() w.dateobs = '31/12/99' assert w.datfix() == 0 assert w.dateobs == '1999-12-31' assert w.mjdobs == 51543.0 def test_equinox(): w = _wcs.Wcsprm() assert np.isnan(w.equinox) w.equinox = 0 assert w.equinox == 0 del w.equinox assert np.isnan(w.equinox) with pytest.raises(TypeError): w.equinox = None def test_fix(): w = _wcs.Wcsprm() assert w.fix() == { 'cdfix': 'No change', 'cylfix': 'No change', 'datfix': 'No change', 'spcfix': 'No change', 'unitfix': 'No change', 'celfix': 'No change'} def test_fix2(): w = _wcs.Wcsprm() w.dateobs = '31/12/99' assert w.fix() == { 'cdfix': 'No change', 'cylfix': 'No change', 'datfix': "Changed '31/12/99' to '1999-12-31'", 'spcfix': 'No change', 'unitfix': 'No change', 'celfix': 'No change'} assert w.dateobs == '1999-12-31' assert w.mjdobs == 51543.0 def test_fix3(): w = _wcs.Wcsprm() w.dateobs = '31/12/F9' assert w.fix() == { 'cdfix': 'No change', 'cylfix': 'No change', 'datfix': "Invalid parameter value: invalid date '31/12/F9'", 'spcfix': 'No change', 'unitfix': 'No change', 'celfix': 'No change'} assert w.dateobs == '31/12/F9' assert np.isnan(w.mjdobs) def test_fix4(): w = _wcs.Wcsprm() with pytest.raises(ValueError): w.fix('X') def test_fix5(): w = _wcs.Wcsprm() with pytest.raises(ValueError): w.fix(naxis=[0, 1, 2]) def test_get_ps(): # TODO: We need some data with PSi_ma keywords w = _wcs.Wcsprm() assert len(w.get_ps()) == 0 def test_get_pv(): # TODO: We need some data with PVi_ma keywords w = _wcs.Wcsprm() assert len(w.get_pv()) == 0 @raises(AssertionError) def test_imgpix_matrix(): w = _wcs.Wcsprm() w.imgpix_matrix @raises(AttributeError) def test_imgpix_matrix2(): w = _wcs.Wcsprm() w.imgpix_matrix = None def test_isunity(): w = _wcs.Wcsprm() assert(w.is_unity()) def test_lat(): w = _wcs.Wcsprm() assert w.lat == -1 @raises(AttributeError) def test_lat_set(): w = _wcs.Wcsprm() w.lat = 0 def test_latpole(): w = _wcs.Wcsprm() assert w.latpole == 90.0 w.latpole = 45.0 assert w.latpole == 45.0 del w.latpole assert w.latpole == 90.0 def test_lattyp(): w = _wcs.Wcsprm() print(repr(w.lattyp)) assert w.lattyp == " " @raises(AttributeError) def test_lattyp_set(): w = _wcs.Wcsprm() w.lattyp = 0 def test_lng(): w = _wcs.Wcsprm() assert w.lng == -1 @raises(AttributeError) def test_lng_set(): w = _wcs.Wcsprm() w.lng = 0 def test_lngtyp(): w = _wcs.Wcsprm() assert w.lngtyp == " " @raises(AttributeError) def test_lngtyp_set(): w = _wcs.Wcsprm() w.lngtyp = 0 def test_lonpole(): w = _wcs.Wcsprm() assert np.isnan(w.lonpole) w.lonpole = 45.0 assert w.lonpole == 45.0 del w.lonpole assert np.isnan(w.lonpole) def test_mix(): w = _wcs.Wcsprm() w.ctype = [b'RA---TAN', 'DEC--TAN'] with pytest.raises(_wcs.InvalidCoordinateError): w.mix(1, 1, [240, 480], 1, 5, [0, 2], [54, 32], 1) def test_mjdavg(): w = _wcs.Wcsprm() assert np.isnan(w.mjdavg) w.mjdavg = 45.0 assert w.mjdavg == 45.0 del w.mjdavg assert np.isnan(w.mjdavg) def test_mjdobs(): w = _wcs.Wcsprm() assert np.isnan(w.mjdobs) w.mjdobs = 45.0 assert w.mjdobs == 45.0 del w.mjdobs assert np.isnan(w.mjdobs) def test_name(): w = _wcs.Wcsprm() assert w.name == '' w.name = 'foo' assert w.name == 'foo' def test_naxis(): w = _wcs.Wcsprm() assert w.naxis == 2 @raises(AttributeError) def test_naxis_set(): w = _wcs.Wcsprm() w.naxis = 4 def test_obsgeo(): w = _wcs.Wcsprm() assert np.all(np.isnan(w.obsgeo)) w.obsgeo = [1, 2, 3] assert_array_equal(w.obsgeo, [1, 2, 3]) del w.obsgeo assert np.all(np.isnan(w.obsgeo)) def test_pc(): w = _wcs.Wcsprm() assert w.has_pc() assert_array_equal(w.pc, [[1, 0], [0, 1]]) w.cd = [[1, 0], [0, 1]] assert not w.has_pc() del w.cd assert w.has_pc() assert_array_equal(w.pc, [[1, 0], [0, 1]]) w.pc = w.pc @raises(AttributeError) def test_pc_missing(): w = _wcs.Wcsprm() w.cd = [[1, 0], [0, 1]] assert not w.has_pc() w.pc def test_phi0(): w = _wcs.Wcsprm() assert np.isnan(w.phi0) w.phi0 = 42.0 assert w.phi0 == 42.0 del w.phi0 assert np.isnan(w.phi0) @raises(AssertionError) def test_piximg_matrix(): w = _wcs.Wcsprm() w.piximg_matrix @raises(AttributeError) def test_piximg_matrix2(): w = _wcs.Wcsprm() w.piximg_matrix = None def test_print_contents(): # In general, this is human-consumable, so we don't care if the # content changes, just check the type w = _wcs.Wcsprm() assert isinstance(str(w), str) def test_radesys(): w = _wcs.Wcsprm() assert w.radesys == '' w.radesys = 'foo' assert w.radesys == 'foo' def test_restfrq(): w = _wcs.Wcsprm() assert w.restfrq == 0.0 w.restfrq = np.nan assert np.isnan(w.restfrq) del w.restfrq def test_restwav(): w = _wcs.Wcsprm() assert w.restwav == 0.0 w.restwav = np.nan assert np.isnan(w.restwav) del w.restwav def test_set_ps(): w = _wcs.Wcsprm() data = [(0, 0, "param1"), (1, 1, "param2")] w.set_ps(data) assert w.get_ps() == data def test_set_ps_realloc(): w = _wcs.Wcsprm() w.set_ps([(0, 0, "param1")] * 16) def test_set_pv(): w = _wcs.Wcsprm() data = [(0, 0, 42.), (1, 1, 54.)] w.set_pv(data) assert w.get_pv() == data def test_set_pv_realloc(): w = _wcs.Wcsprm() w.set_pv([(0, 0, 42.)] * 16) def test_spcfix(): # TODO: We need some data with broken spectral headers here to # really test header = get_pkg_data_contents( 'spectra/orion-velo-1.hdr', encoding='binary') w = _wcs.Wcsprm(header) assert w.spcfix() == -1 def test_spec(): w = _wcs.Wcsprm() assert w.spec == -1 @raises(AttributeError) def test_spec_set(): w = _wcs.Wcsprm() w.spec = 0 def test_specsys(): w = _wcs.Wcsprm() assert w.specsys == '' w.specsys = 'foo' assert w.specsys == 'foo' def test_sptr(): # TODO: Write me pass def test_ssysobs(): w = _wcs.Wcsprm() assert w.ssysobs == '' w.ssysobs = 'foo' assert w.ssysobs == 'foo' def test_ssyssrc(): w = _wcs.Wcsprm() assert w.ssyssrc == '' w.ssyssrc = 'foo' assert w.ssyssrc == 'foo' def test_tab(): w = _wcs.Wcsprm() assert len(w.tab) == 0 # TODO: Inject some headers that have tables and test def test_theta0(): w = _wcs.Wcsprm() assert np.isnan(w.theta0) w.theta0 = 42.0 assert w.theta0 == 42.0 del w.theta0 assert np.isnan(w.theta0) def test_toheader(): w = _wcs.Wcsprm() assert isinstance(w.to_header(), str) def test_velangl(): w = _wcs.Wcsprm() assert np.isnan(w.velangl) w.velangl = 42.0 assert w.velangl == 42.0 del w.velangl assert np.isnan(w.velangl) def test_velosys(): w = _wcs.Wcsprm() assert np.isnan(w.velosys) w.velosys = 42.0 assert w.velosys == 42.0 del w.velosys assert np.isnan(w.velosys) def test_velref(): w = _wcs.Wcsprm() assert w.velref == 0.0 w.velref = 42.0 assert w.velref == 42.0 del w.velref assert w.velref == 0.0 def test_zsource(): w = _wcs.Wcsprm() assert np.isnan(w.zsource) w.zsource = 42.0 assert w.zsource == 42.0 del w.zsource assert np.isnan(w.zsource) def test_cd_3d(): header = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary') w = _wcs.Wcsprm(header) assert w.cd.shape == (3, 3) assert w.get_pc().shape == (3, 3) assert w.get_cdelt().shape == (3,) def test_get_pc(): header = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary') w = _wcs.Wcsprm(header) pc = w.get_pc() try: pc[0, 0] = 42 except (RuntimeError, ValueError): pass else: raise AssertionError() @raises(_wcs.SingularMatrixError) def test_detailed_err(): w = _wcs.Wcsprm() w.pc = [[0, 0], [0, 0]] w.set() def test_header_parse(): from ...io import fits with get_pkg_data_fileobj( 'data/header_newlines.fits', encoding='binary') as test_file: hdulist = fits.open(test_file) w = wcs.WCS(hdulist[0].header) assert w.wcs.ctype[0] == 'RA---TAN-SIP' def test_locale(): orig_locale = locale.getlocale(locale.LC_NUMERIC)[0] try: locale.setlocale(locale.LC_NUMERIC, 'fr_FR') except locale.Error: pytest.xfail( "Can't set to 'fr_FR' locale, perhaps because it is not installed " "on this system") try: header = get_pkg_data_contents('data/locale.hdr', encoding='binary') w = _wcs.Wcsprm(header) assert re.search("[0-9]+,[0-9]*", w.to_header()) is None finally: if orig_locale is None: # reset to the default setting locale.resetlocale(locale.LC_NUMERIC) else: # restore to whatever the previous value had been set to for # whatever reason locale.setlocale(locale.LC_NUMERIC, orig_locale) @raises(UnicodeEncodeError) def test_unicode(): w = _wcs.Wcsprm() w.alt = "‰" def test_sub_segfault(): # Issue #1960 header = fits.Header.fromtextfile( get_pkg_data_filename('data/sub-segfault.hdr')) w = wcs.WCS(header) sub = w.sub([wcs.WCSSUB_CELESTIAL]) gc.collect() def test_bounds_check(): w = _wcs.Wcsprm() w.bounds_check(False) def test_wcs_sub_error_message(): # Issue #1587 w = _wcs.Wcsprm() with pytest.raises(TypeError) as e: w.sub('latitude') assert str(e).endswith("axes must None, a sequence or an integer") def test_wcs_sub(): # Issue #3356 w = _wcs.Wcsprm() w.sub(['latitude']) w = _wcs.Wcsprm() w.sub([b'latitude']) def test_compare(): header = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary') w = _wcs.Wcsprm(header) w2 = _wcs.Wcsprm(header) assert w == w2 w.equinox = 42 assert w == w2 assert not w.compare(w2) assert w.compare(w2, _wcs.WCSCOMPARE_ANCILLARY) w = _wcs.Wcsprm(header) w2 = _wcs.Wcsprm(header) w.cdelt[0] = np.float32(0.00416666666666666666666666) w2.cdelt[0] = np.float64(0.00416666666666666666666666) assert not w.compare(w2) assert w.compare(w2, tolerance=1e-6) def test_radesys_defaults(): w = _wcs.Wcsprm() w.ctype = ['RA---TAN', 'DEC--TAN'] w.set() assert w.radesys == "ICRS" def test_radesys_defaults_full(): # As described in Section 3.1 of the FITS standard "Equatorial and ecliptic # coordinates", for those systems the RADESYS keyword can be used to # indicate the equatorial/ecliptic frame to use. From the standard: # "For RADESYSa values of FK4 and FK4-NO-E, any stated equinox is Besselian # and, if neither EQUINOXa nor EPOCH are given, a default of 1950.0 is to # be taken. For FK5, any stated equinox is Julian and, if neither keyword # is given, it defaults to 2000.0. # "If the EQUINOXa keyword is given it should always be accompanied by # RADESYS a. However, if it should happen to ap- pear by itself then # RADESYSa defaults to FK4 if EQUINOXa < 1984.0, or to FK5 if EQUINOXa # 1984.0. Note that these defaults, while probably true of older files # using the EPOCH keyword, are not required of them. # By default RADESYS is empty w = _wcs.Wcsprm(naxis=2) assert w.radesys == '' assert np.isnan(w.equinox) # For non-ecliptic or equatorial systems it is still empty w = _wcs.Wcsprm(naxis=2) for ctype in [('GLON-CAR', 'GLAT-CAR'), ('SLON-SIN', 'SLAT-SIN')]: w.ctype = ctype w.set() assert w.radesys == '' assert np.isnan(w.equinox) for ctype in [('RA---TAN', 'DEC--TAN'), ('ELON-TAN', 'ELAT-TAN'), ('DEC--TAN', 'RA---TAN'), ('ELAT-TAN', 'ELON-TAN')]: # Check defaults for RADESYS w = _wcs.Wcsprm(naxis=2) w.ctype = ctype w.set() assert w.radesys == 'ICRS' w = _wcs.Wcsprm(naxis=2) w.ctype = ctype w.equinox = 1980 w.set() assert w.radesys == 'FK4' w = _wcs.Wcsprm(naxis=2) w.ctype = ctype w.equinox = 1984 w.set() assert w.radesys == 'FK5' w = _wcs.Wcsprm(naxis=2) w.ctype = ctype w.radesys = 'foo' w.set() assert w.radesys == 'foo' # Check defaults for EQUINOX w = _wcs.Wcsprm(naxis=2) w.ctype = ctype w.set() assert np.isnan(w.equinox) # frame is ICRS, no equinox w = _wcs.Wcsprm(naxis=2) w.ctype = ctype w.radesys = 'ICRS' w.set() assert np.isnan(w.equinox) w = _wcs.Wcsprm(naxis=2) w.ctype = ctype w.radesys = 'FK5' w.set() assert w.equinox == 2000. w = _wcs.Wcsprm(naxis=2) w.ctype = ctype w.radesys = 'FK4' w.set() assert w.equinox == 1950 w = _wcs.Wcsprm(naxis=2) w.ctype = ctype w.radesys = 'FK4-NO-E' w.set() assert w.equinox == 1950 def test_iteration(): world = np.array( [[-0.58995335, -0.5], [0.00664326, -0.5], [-0.58995335, -0.25], [0.00664326, -0.25], [-0.58995335, 0.], [0.00664326, 0.], [-0.58995335, 0.25], [0.00664326, 0.25], [-0.58995335, 0.5], [0.00664326, 0.5]], float ) w = wcs.WCS() w.wcs.ctype = ['GLON-CAR', 'GLAT-CAR'] w.wcs.cdelt = [-0.006666666828, 0.006666666828] w.wcs.crpix = [75.907, 74.8485] x = w.wcs_world2pix(world, 1) expected = np.array( [[1.64400000e+02, -1.51498185e-01], [7.49105110e+01, -1.51498185e-01], [1.64400000e+02, 3.73485009e+01], [7.49105110e+01, 3.73485009e+01], [1.64400000e+02, 7.48485000e+01], [7.49105110e+01, 7.48485000e+01], [1.64400000e+02, 1.12348499e+02], [7.49105110e+01, 1.12348499e+02], [1.64400000e+02, 1.49848498e+02], [7.49105110e+01, 1.49848498e+02]], float) assert_array_almost_equal(x, expected) w2 = w.wcs_pix2world(x, 1) world[:, 0] %= 360. assert_array_almost_equal(w2, world) def test_invalid_args(): with pytest.raises(TypeError): w = _wcs.Wcsprm(keysel='A') with pytest.raises(ValueError): w = _wcs.Wcsprm(keysel=2) with pytest.raises(ValueError): w = _wcs.Wcsprm(colsel=2) with pytest.raises(ValueError): w = _wcs.Wcsprm(naxis=64) header = get_pkg_data_contents( 'spectra/orion-velo-1.hdr', encoding='binary') with pytest.raises(ValueError): w = _wcs.Wcsprm(header, relax='FOO') with pytest.raises(ValueError): w = _wcs.Wcsprm(header, naxis=3) with pytest.raises(KeyError): w = _wcs.Wcsprm(header, key='A')
4ddded1e59c261c4dd31caa039aaf17b52a17823830f4ac61d470a60bc697ffb
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import sys if __name__ == '__main__': astropy_path = sys.argv[-1] sys.argv = sys.argv[:-1] sys.path.insert(0, astropy_path) from astropy import wcs import numpy as np from distutils.core import setup, Extension if sys.platform == 'win32': # These are written into wcsconfig.h, but that file is not # used by all parts of wcslib. define_macros = [ ('YY_NO_UNISTD_H', None), ('_CRT_SECURE_NO_WARNINGS', None), ('_NO_OLDNAMES', None), # for mingw32 ('NO_OLDNAMES', None), # for mingw64 ('__STDC__', None) # for MSVC ] else: define_macros = [] try: numpy_include = np.get_include() except AttributeError: numpy_include = np.get_numpy_include() wcsapi_test_module = Extension( str('wcsapi_test'), include_dirs=[ numpy_include, os.path.join(wcs.get_include(), 'astropy_wcs'), os.path.join(wcs.get_include(), 'wcslib') ], # Use the *full* name to the c file, since we can't change the cwd # during testing sources=[str(os.path.join(os.path.dirname(__file__), 'wcsapi_test.c'))], define_macros=define_macros) setup( name='wcsapi_test', ext_modules=[wcsapi_test_module])
6c6417a3818fcb3b8109394eea76ff8bfceaa8fff36b42b83efa0bc00999876d
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import subprocess import sys import pytest def test_wcsapi_extension(tmpdir): # Test that we can build a simple C extension with the astropy.wcs C API setup_path = os.path.dirname(__file__) astropy_path = os.path.abspath( os.path.join(setup_path, '..', '..', '..', '..')) env = os.environ.copy() paths = [str(tmpdir), astropy_path] if env.get('PYTHONPATH'): paths.append(env.get('PYTHONPATH')) env[str('PYTHONPATH')] = str(os.pathsep.join(paths)) # Build the extension # This used to use subprocess.check_call, but on Python 3.4 there was # a mysterious Heisenbug causing this to fail with a non-zero exit code # *unless* the output is redirected. This bug also did not occur in an # interactive session, so it likely had something to do with pytest's # output capture p = subprocess.Popen([sys.executable, 'setup.py', 'install', '--install-lib={0}'.format(tmpdir), astropy_path], cwd=setup_path, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Whether the process fails or not this isn't likely to produce a great # deal of output so communicate should be fine in almost all cases stdout, stderr = p.communicate() try: stdout, stderr = stdout.decode('utf8'), stderr.decode('utf8') except UnicodeDecodeError: # Don't try to guess about encoding; just display the text stdout, stderr = stdout.decode('latin1'), stderr.decode('latin1') # If compilation fails, we can skip this test, since the # dependencies necessary to compile an extension may be missing. # If it passes, however, we want to continue and ensure that the # extension created is actually usable. However, if we're on # Travis-CI, or another generic continuous integration setup, we # don't want to ever skip, because having it fail in that # environment probably indicates something more serious that we # want to know about. if (not (str('CI') in os.environ or str('TRAVIS') in os.environ or str('CONTINUOUS_INTEGRATION') in os.environ) and p.returncode): pytest.skip("system unable to compile extensions") return assert p.returncode == 0, ( "setup.py exited with non-zero return code {0}\n" "stdout:\n\n{1}\n\nstderr:\n\n{2}\n".format( p.returncode, stdout, stderr)) code = """ import sys import wcsapi_test sys.exit(wcsapi_test.test()) """ code = code.strip().replace('\n', '; ') # Import and run the extension subprocess.check_call([sys.executable, '-c', code], env=env)
d069df7490a2a962b063499c3954ac12d09311d8e67a1ed501694546e6a79239
# Licensed under a 3-clause BSD style license - see LICENSE.rst import functools import itertools import numpy as np import operator import pytest from .. import (Time, TimeDelta, OperandTypeError, ScaleValueError, TIME_SCALES, TIME_DELTA_SCALES) from ... import units as u allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0) allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52, atol=2. ** -52) # 20 ps atol allclose_sec = functools.partial(np.allclose, rtol=2. ** -52, atol=2. ** -52 * 24 * 3600) # 20 ps atol class TestTimeDelta(): """Test TimeDelta class""" def setup(self): self.t = Time('2010-01-01', scale='utc') self.t2 = Time('2010-01-02 00:00:01', scale='utc') self.t3 = Time('2010-01-03 01:02:03', scale='utc', precision=9, in_subfmt='date_hms', out_subfmt='date_hm', location=(-75.*u.degree, 30.*u.degree, 500*u.m)) self.dt = TimeDelta(100.0, format='sec') self.dt_array = TimeDelta(np.arange(100, 1000, 100), format='sec') def test_sub(self): # time - time dt = self.t2 - self.t assert (repr(dt).startswith("<TimeDelta object: scale='tai' " "format='jd' value=1.00001157407")) assert allclose_jd(dt.jd, 86401.0 / 86400.0) assert allclose_sec(dt.sec, 86401.0) # time - delta_time t = self.t2 - dt assert t.iso == self.t.iso # delta_time - delta_time dt2 = dt - self.dt assert allclose_sec(dt2.sec, 86301.0) # delta_time - time with pytest.raises(OperandTypeError): dt - self.t def test_add(self): # time + time with pytest.raises(OperandTypeError): self.t2 + self.t # time + delta_time dt = self.t2 - self.t t2 = self.t + dt assert t2.iso == self.t2.iso # delta_time + delta_time dt2 = dt + self.dt assert allclose_sec(dt2.sec, 86501.0) # delta_time + time dt = self.t2 - self.t t2 = dt + self.t assert t2.iso == self.t2.iso def test_add_vector(self): """Check time arithmetic as well as properly keeping track of whether a time is a scalar or a vector""" t = Time(0.0, format='mjd', scale='utc') t2 = Time([0.0, 1.0], format='mjd', scale='utc') dt = TimeDelta(100.0, format='jd') dt2 = TimeDelta([100.0, 200.0], format='jd') out = t + dt assert allclose_jd(out.mjd, 100.0) assert out.isscalar out = t + dt2 assert allclose_jd(out.mjd, [100.0, 200.0]) assert not out.isscalar out = t2 + dt assert allclose_jd(out.mjd, [100.0, 101.0]) assert not out.isscalar out = dt + dt assert allclose_jd(out.jd, 200.0) assert out.isscalar out = dt + dt2 assert allclose_jd(out.jd, [200.0, 300.0]) assert not out.isscalar # Reverse the argument order out = dt + t assert allclose_jd(out.mjd, 100.0) assert out.isscalar out = dt2 + t assert allclose_jd(out.mjd, [100.0, 200.0]) assert not out.isscalar out = dt + t2 assert allclose_jd(out.mjd, [100.0, 101.0]) assert not out.isscalar out = dt2 + dt assert allclose_jd(out.jd, [200.0, 300.0]) assert not out.isscalar def test_sub_vector(self): """Check time arithmetic as well as properly keeping track of whether a time is a scalar or a vector""" t = Time(0.0, format='mjd', scale='utc') t2 = Time([0.0, 1.0], format='mjd', scale='utc') dt = TimeDelta(100.0, format='jd') dt2 = TimeDelta([100.0, 200.0], format='jd') out = t - dt assert allclose_jd(out.mjd, -100.0) assert out.isscalar out = t - dt2 assert allclose_jd(out.mjd, [-100.0, -200.0]) assert not out.isscalar out = t2 - dt assert allclose_jd(out.mjd, [-100.0, -99.0]) assert not out.isscalar out = dt - dt assert allclose_jd(out.jd, 0.0) assert out.isscalar out = dt - dt2 assert allclose_jd(out.jd, [0.0, -100.0]) assert not out.isscalar @pytest.mark.parametrize('values', [(2455197.5, 2455198.5), ([2455197.5], [2455198.5])]) def test_copy_timedelta(self, values): """Test copying the values of a TimeDelta object by passing it into the Time initializer. """ val1, val2 = values t = Time(val1, format='jd', scale='utc') t2 = Time(val2, format='jd', scale='utc') dt = t2 - t dt2 = TimeDelta(dt, copy=False) assert np.all(dt.jd == dt2.jd) assert dt._time.jd1 is dt2._time.jd1 assert dt._time.jd2 is dt2._time.jd2 dt2 = TimeDelta(dt, copy=True) assert np.all(dt.jd == dt2.jd) assert dt._time.jd1 is not dt2._time.jd1 assert dt._time.jd2 is not dt2._time.jd2 # Include initializers dt2 = TimeDelta(dt, format='sec') assert allclose_sec(dt2.value, 86400.0) def test_neg_abs(self): for dt in (self.dt, self.dt_array): dt2 = -dt assert np.all(dt2.jd == -dt.jd) dt3 = abs(dt) assert np.all(dt3.jd == dt.jd) dt4 = abs(dt2) assert np.all(dt4.jd == dt.jd) def test_mul_div(self): for dt in (self.dt, self.dt_array): dt2 = dt + dt + dt dt3 = 3. * dt assert allclose_jd(dt2.jd, dt3.jd) dt4 = dt3 / 3. assert allclose_jd(dt4.jd, dt.jd) dt5 = self.dt * np.arange(3) assert dt5[0].jd == 0. assert dt5[-1].jd == (self.dt + self.dt).jd with pytest.raises(OperandTypeError): self.dt * self.dt with pytest.raises(OperandTypeError): self.dt * self.t def test_keep_properties(self): # closes #1924 (partially) dt = TimeDelta(1000., format='sec') for t in (self.t, self.t3): ta = t + dt assert ta.location is t.location assert ta.precision == t.precision assert ta.in_subfmt == t.in_subfmt assert ta.out_subfmt == t.out_subfmt tr = dt + t assert tr.location is t.location assert tr.precision == t.precision assert tr.in_subfmt == t.in_subfmt assert tr.out_subfmt == t.out_subfmt ts = t - dt assert ts.location is t.location assert ts.precision == t.precision assert ts.in_subfmt == t.in_subfmt assert ts.out_subfmt == t.out_subfmt t_tdb = self.t.tdb assert hasattr(t_tdb, '_delta_tdb_tt') assert not hasattr(t_tdb, '_delta_ut1_utc') t_tdb_ut1 = t_tdb.ut1 assert hasattr(t_tdb_ut1, '_delta_tdb_tt') assert hasattr(t_tdb_ut1, '_delta_ut1_utc') t_tdb_ut1_utc = t_tdb_ut1.utc assert hasattr(t_tdb_ut1_utc, '_delta_tdb_tt') assert hasattr(t_tdb_ut1_utc, '_delta_ut1_utc') # adding or subtracting some time should remove the delta's # since these are time-dependent and should be recalculated for op in (operator.add, operator.sub): t1 = op(t_tdb, dt) assert not hasattr(t1, '_delta_tdb_tt') assert not hasattr(t1, '_delta_ut1_utc') t2 = op(t_tdb_ut1, dt) assert not hasattr(t2, '_delta_tdb_tt') assert not hasattr(t2, '_delta_ut1_utc') t3 = op(t_tdb_ut1_utc, dt) assert not hasattr(t3, '_delta_tdb_tt') assert not hasattr(t3, '_delta_ut1_utc') def test_set_format(self): """ Test basics of setting format attribute. """ dt = TimeDelta(86400.0, format='sec') assert dt.value == 86400.0 assert dt.format == 'sec' dt.format = 'jd' assert dt.value == 1.0 assert dt.format == 'jd' class TestTimeDeltaScales(): """Test scale conversion for Time Delta. Go through @taldcroft's list of expected behaviour from #1932""" def setup(self): # pick a date that includes a leap second for better testing self.iso_times = ['2012-06-30 12:00:00', '2012-06-30 23:59:59', '2012-07-01 00:00:00', '2012-07-01 12:00:00'] self.t = dict((scale, Time(self.iso_times, scale=scale, precision=9)) for scale in TIME_SCALES) self.dt = dict((scale, self.t[scale]-self.t[scale][0]) for scale in TIME_SCALES) def test_delta_scales_definition(self): for scale in list(TIME_DELTA_SCALES) + [None]: TimeDelta([0., 1., 10.], format='sec', scale=scale) with pytest.raises(ScaleValueError): TimeDelta([0., 1., 10.], format='sec', scale='utc') @pytest.mark.parametrize(('scale1', 'scale2'), list(itertools.product(TIME_SCALES, TIME_SCALES))) def test_scales_for_time_minus_time(self, scale1, scale2): """T(X) - T2(Y) -- does T(X) - T2(Y).X and return dT(X) and T(X) +/- dT(Y) -- does (in essence) (T(X).Y +/- dT(Y)).X I.e., time differences of two times should have the scale of the first time. The one exception is UTC, which returns TAI. There are no timescales for which this does not work. """ t1 = self.t[scale1] t2 = self.t[scale2] dt = t1 - t2 if scale1 in TIME_DELTA_SCALES: assert dt.scale == scale1 else: assert scale1 == 'utc' assert dt.scale == 'tai' # now check with delta time; also check reversibility t1_recover_t2_scale = t2 + dt assert t1_recover_t2_scale.scale == scale2 t1_recover = getattr(t1_recover_t2_scale, scale1) assert allclose_jd(t1_recover.jd, t1.jd) t2_recover_t1_scale = t1 - dt assert t2_recover_t1_scale.scale == scale1 t2_recover = getattr(t2_recover_t1_scale, scale2) assert allclose_jd(t2_recover.jd, t2.jd) def test_scales_for_delta_minus_delta(self): """dT(X) +/- dT2(Y) -- Add/substract JDs for dT(X) and dT(Y).X I.e. this will succeed if dT(Y) can be converted to scale X. Returns delta time in scale X """ # geocentric timescales dt_tai = self.dt['tai'] dt_tt = self.dt['tt'] dt0 = dt_tai - dt_tt assert dt0.scale == 'tai' # tai and tt have the same scale, so differences should be the same assert allclose_sec(dt0.sec, 0.) dt_tcg = self.dt['tcg'] dt1 = dt_tai - dt_tcg assert dt1.scale == 'tai' # tai and tcg do not have the same scale, so differences different assert not allclose_sec(dt1.sec, 0.) t_tai_tcg = self.t['tai'].tcg dt_tai_tcg = t_tai_tcg - t_tai_tcg[0] dt2 = dt_tai - dt_tai_tcg assert dt2.scale == 'tai' # but if tcg difference calculated from tai, it should roundtrip assert allclose_sec(dt2.sec, 0.) # check that if we put TCG first, we get a TCG scale back dt3 = dt_tai_tcg - dt_tai assert dt3.scale == 'tcg' assert allclose_sec(dt3.sec, 0.) for scale in 'tdb', 'tcb', 'ut1': with pytest.raises(TypeError): dt_tai - self.dt[scale] # barycentric timescales dt_tcb = self.dt['tcb'] dt_tdb = self.dt['tdb'] dt4 = dt_tcb - dt_tdb assert dt4.scale == 'tcb' assert not allclose_sec(dt1.sec, 0.) t_tcb_tdb = self.t['tcb'].tdb dt_tcb_tdb = t_tcb_tdb - t_tcb_tdb[0] dt5 = dt_tcb - dt_tcb_tdb assert dt5.scale == 'tcb' assert allclose_sec(dt5.sec, 0.) for scale in 'utc', 'tai', 'tt', 'tcg', 'ut1': with pytest.raises(TypeError): dt_tcb - self.dt[scale] # rotational timescale dt_ut1 = self.dt['ut1'] dt5 = dt_ut1 - dt_ut1[-1] assert dt5.scale == 'ut1' assert dt5[-1].sec == 0. for scale in 'utc', 'tai', 'tt', 'tcg', 'tcb', 'tdb': with pytest.raises(TypeError): dt_ut1 - self.dt[scale] @pytest.mark.parametrize( ('scale', 'op'), list(itertools.product(TIME_SCALES, (operator.add, operator.sub)))) def test_scales_for_delta_scale_is_none(self, scale, op): """T(X) +/- dT(None) or T(X) +/- Quantity(time-like) This is always allowed and just adds JDs, i.e., the scale of the TimeDelta or time-like Quantity will be taken to be X. The one exception is again for X=UTC, where TAI is assumed instead, so that a day is always defined as 86400 seconds. """ dt_none = TimeDelta([0., 1., -1., 1000.], format='sec') assert dt_none.scale is None q_time = dt_none.to('s') dt = self.dt[scale] dt1 = op(dt, dt_none) assert dt1.scale == dt.scale assert allclose_jd(dt1.jd, op(dt.jd, dt_none.jd)) dt2 = op(dt_none, dt) assert dt2.scale == dt.scale assert allclose_jd(dt2.jd, op(dt_none.jd, dt.jd)) dt3 = op(q_time, dt) assert dt3.scale == dt.scale assert allclose_jd(dt3.jd, dt2.jd) t = self.t[scale] t1 = op(t, dt_none) assert t1.scale == t.scale assert allclose_jd(t1.jd, op(t.jd, dt_none.jd)) if op is operator.add: t2 = op(dt_none, t) assert t2.scale == t.scale assert allclose_jd(t2.jd, t1.jd) t3 = op(t, q_time) assert t3.scale == t.scale assert allclose_jd(t3.jd, t1.jd) @pytest.mark.parametrize('scale', TIME_SCALES) def test_delta_day_is_86400_seconds(self, scale): """TimeDelta or Quantity holding 1 day always means 24*60*60 seconds This holds true for all timescales but UTC, for which leap-second days are longer or shorter by one second. """ t = self.t[scale] dt_day = TimeDelta(1., format='jd') q_day = dt_day.to('day') dt_day_leap = t[-1] - t[0] # ^ = exclusive or, so either equal and not UTC, or not equal and UTC assert allclose_jd(dt_day_leap.jd, dt_day.jd) ^ (scale == 'utc') t1 = t[0] + dt_day assert allclose_jd(t1.jd, t[-1].jd) ^ (scale == 'utc') t2 = q_day + t[0] assert allclose_jd(t2.jd, t[-1].jd) ^ (scale == 'utc') t3 = t[-1] - dt_day assert allclose_jd(t3.jd, t[0].jd) ^ (scale == 'utc') t4 = t[-1] - q_day assert allclose_jd(t4.jd, t[0].jd) ^ (scale == 'utc') def test_timedelta_setitem(): t = TimeDelta([1, 2, 3] * u.d, format='jd') t[0] = 0.5 assert allclose_jd(t.value, [0.5, 2, 3]) t[1:] = 4.5 assert allclose_jd(t.value, [0.5, 4.5, 4.5]) t[:] = 86400 * u.s assert allclose_jd(t.value, [1, 1, 1]) t[1] = TimeDelta(2, format='jd') assert allclose_jd(t.value, [1, 2, 1]) with pytest.raises(ValueError) as err: t[1] = 1 * u.m assert 'cannot convert value to a compatible TimeDelta' in str(err) def test_timedelta_mask(): t = TimeDelta([1, 2] * u.d, format='jd') t[1] = np.ma.masked assert np.all(t.mask == [False, True]) assert allclose_jd(t[0].value, 1) assert t.value[1] is np.ma.masked
a2f201beec18e23c0922bea391b8fe3521a955e9c10d3cbbd93ce331f273e855
# Licensed under a 3-clause BSD style license - see LICENSE.rst import copy import functools import datetime from copy import deepcopy import numpy as np from ...tests.helper import catch_warnings, pytest from ...utils import isiterable from .. import Time, ScaleValueError, TIME_SCALES, TimeString, TimezoneInfo from ...coordinates import EarthLocation from ... import units as u from ... import _erfa as erfa from ...table import Column try: import pytz HAS_PYTZ = True except ImportError: HAS_PYTZ = False allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0) allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52, atol=2. ** -52) # 20 ps atol allclose_sec = functools.partial(np.allclose, rtol=2. ** -52, atol=2. ** -52 * 24 * 3600) # 20 ps atol allclose_year = functools.partial(np.allclose, rtol=2. ** -52, atol=0.) # 14 microsec at current epoch def setup_function(func): func.FORMATS_ORIG = deepcopy(Time.FORMATS) def teardown_function(func): Time.FORMATS.clear() Time.FORMATS.update(func.FORMATS_ORIG) class TestBasic(): """Basic tests stemming from initial example and API reference""" def test_simple(self): times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00'] t = Time(times, format='iso', scale='utc') assert (repr(t) == "<Time object: scale='utc' format='iso' " "value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>") assert allclose_jd(t.jd1, np.array([2451180., 2455198.])) assert allclose_jd2(t.jd2, np.array([-0.5+1.4288980208333335e-06, -0.50000000e+00])) # Set scale to TAI t = t.tai assert (repr(t) == "<Time object: scale='tai' format='iso' " "value=['1999-01-01 00:00:32.123' '2010-01-01 00:00:34.000']>") assert allclose_jd(t.jd1, np.array([2451180., 2455198.])) assert allclose_jd2(t.jd2, np.array([-0.5+0.00037179926839122024, -0.5+0.00039351851851851852])) # Get a new ``Time`` object which is referenced to the TT scale # (internal JD1 and JD1 are now with respect to TT scale)""" assert (repr(t.tt) == "<Time object: scale='tt' format='iso' " "value=['1999-01-01 00:01:04.307' '2010-01-01 00:01:06.184']>") # Get the representation of the ``Time`` object in a particular format # (in this case seconds since 1998.0). This returns either a scalar or # array, depending on whether the input was a scalar or array""" assert allclose_sec(t.cxcsec, np.array([31536064.307456788, 378691266.18400002])) def test_different_dimensions(self): """Test scalars, vector, and higher-dimensions""" # scalar val, val1 = 2450000.0, 0.125 t1 = Time(val, val1, format='jd') assert t1.isscalar is True and t1.shape == () # vector val = np.arange(2450000., 2450010.) t2 = Time(val, format='jd') assert t2.isscalar is False and t2.shape == val.shape # explicitly check broadcasting for mixed vector, scalar. val2 = 0. t3 = Time(val, val2, format='jd') assert t3.isscalar is False and t3.shape == val.shape val2 = (np.arange(5.)/10.).reshape(5, 1) # now see if broadcasting to two-dimensional works t4 = Time(val, val2, format='jd') assert t4.isscalar is False assert t4.shape == np.broadcast(val, val2).shape @pytest.mark.parametrize('value', [2455197.5, [2455197.5]]) def test_copy_time(self, value): """Test copying the values of a Time object by passing it into the Time initializer. """ t = Time(value, format='jd', scale='utc') t2 = Time(t, copy=False) assert np.all(t.jd - t2.jd == 0) assert np.all((t - t2).jd == 0) assert t._time.jd1 is t2._time.jd1 assert t._time.jd2 is t2._time.jd2 t2 = Time(t, copy=True) assert np.all(t.jd - t2.jd == 0) assert np.all((t - t2).jd == 0) assert t._time.jd1 is not t2._time.jd1 assert t._time.jd2 is not t2._time.jd2 # Include initializers t2 = Time(t, format='iso', scale='tai', precision=1) assert t2.value == '2010-01-01 00:00:34.0' t2 = Time(t, format='iso', scale='tai', out_subfmt='date') assert t2.value == '2010-01-01' def test_getitem(self): """Test that Time objects holding arrays are properly subscriptable, set isscalar as appropriate, and also subscript delta_ut1_utc, etc.""" mjd = np.arange(50000, 50010) t = Time(mjd, format='mjd', scale='utc', location=('45d', '50d')) t1 = t[3] assert t1.isscalar is True assert t1._time.jd1 == t._time.jd1[3] assert t1.location is t.location t1a = Time(mjd[3], format='mjd', scale='utc') assert t1a.isscalar is True assert np.all(t1._time.jd1 == t1a._time.jd1) t1b = Time(t[3]) assert t1b.isscalar is True assert np.all(t1._time.jd1 == t1b._time.jd1) t2 = t[4:6] assert t2.isscalar is False assert np.all(t2._time.jd1 == t._time.jd1[4:6]) assert t2.location is t.location t2a = Time(t[4:6]) assert t2a.isscalar is False assert np.all(t2a._time.jd1 == t._time.jd1[4:6]) t2b = Time([t[4], t[5]]) assert t2b.isscalar is False assert np.all(t2b._time.jd1 == t._time.jd1[4:6]) t2c = Time((t[4], t[5])) assert t2c.isscalar is False assert np.all(t2c._time.jd1 == t._time.jd1[4:6]) t.delta_tdb_tt = np.arange(len(t)) # Explicitly set (not testing .tdb) t3 = t[4:6] assert np.all(t3._delta_tdb_tt == t._delta_tdb_tt[4:6]) t4 = Time(mjd, format='mjd', scale='utc', location=(np.arange(len(mjd)), np.arange(len(mjd)))) t5 = t4[3] assert t5.location == t4.location[3] t6 = t4[4:6] assert np.all(t6.location == t4.location[4:6]) # check it is a view # (via ndarray, since quantity setter problematic for structured array) allzeros = np.array((0., 0., 0.), dtype=t4.location.dtype) assert t6.location.view(np.ndarray)[-1] != allzeros assert t4.location.view(np.ndarray)[5] != allzeros t6.location.view(np.ndarray)[-1] = allzeros assert t4.location.view(np.ndarray)[5] == allzeros # Test subscription also works for two-dimensional arrays. frac = np.arange(0., 0.999, 0.2) t7 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc', location=('45d', '50d')) assert t7[0, 0]._time.jd1 == t7._time.jd1[0, 0] assert t7[0, 0].isscalar is True assert np.all(t7[5]._time.jd1 == t7._time.jd1[5]) assert np.all(t7[5]._time.jd2 == t7._time.jd2[5]) assert np.all(t7[:, 2]._time.jd1 == t7._time.jd1[:, 2]) assert np.all(t7[:, 2]._time.jd2 == t7._time.jd2[:, 2]) assert np.all(t7[:, 0]._time.jd1 == t._time.jd1) assert np.all(t7[:, 0]._time.jd2 == t._time.jd2) # Get tdb to check that delta_tdb_tt attribute is sliced properly. t7_tdb = t7.tdb assert t7_tdb[0, 0].delta_tdb_tt == t7_tdb.delta_tdb_tt[0, 0] assert np.all(t7_tdb[5].delta_tdb_tt == t7_tdb.delta_tdb_tt[5]) assert np.all(t7_tdb[:, 2].delta_tdb_tt == t7_tdb.delta_tdb_tt[:, 2]) # Explicitly set delta_tdb_tt attribute. Now it should not be sliced. t7.delta_tdb_tt = 0.1 t7_tdb2 = t7.tdb assert t7_tdb2[0, 0].delta_tdb_tt == 0.1 assert t7_tdb2[5].delta_tdb_tt == 0.1 assert t7_tdb2[:, 2].delta_tdb_tt == 0.1 # Check broadcasting of location. t8 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc', location=(np.arange(len(frac)), np.arange(len(frac)))) assert t8[0, 0].location == t8.location[0, 0] assert np.all(t8[5].location == t8.location[5]) assert np.all(t8[:, 2].location == t8.location[:, 2]) # Finally check empty array. t9 = t[:0] assert t9.isscalar is False assert t9.shape == (0,) assert t9.size == 0 def test_properties(self): """Use properties to convert scales and formats. Note that the UT1 to UTC transformation requires a supplementary value (``delta_ut1_utc``) that can be obtained by interpolating from a table supplied by IERS. This is tested separately.""" t = Time('2010-01-01 00:00:00', format='iso', scale='utc') t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform assert allclose_jd(t.jd, 2455197.5) assert t.iso == '2010-01-01 00:00:00.000' assert t.tt.iso == '2010-01-01 00:01:06.184' assert t.tai.fits == '2010-01-01T00:00:34.000(TAI)' assert allclose_jd(t.utc.jd, 2455197.5) assert allclose_jd(t.ut1.jd, 2455197.500003867) assert t.tcg.isot == '2010-01-01T00:01:06.910' assert allclose_sec(t.unix, 1262304000.0) assert allclose_sec(t.cxcsec, 378691266.184) assert allclose_sec(t.gps, 946339215.0) assert t.datetime == datetime.datetime(2010, 1, 1) def test_precision(self): """Set the output precision which is used for some formats. This is also a test of the code that provides a dict for global and instance options.""" t = Time('2010-01-01 00:00:00', format='iso', scale='utc') # Uses initial class-defined precision=3 assert t.iso == '2010-01-01 00:00:00.000' # Set instance precision to 9 t.precision = 9 assert t.iso == '2010-01-01 00:00:00.000000000' assert t.tai.utc.iso == '2010-01-01 00:00:00.000000000' def test_transforms(self): """Transform from UTC to all supported time scales (TAI, TCB, TCG, TDB, TT, UT1, UTC). This requires auxiliary information (latitude and longitude).""" lat = 19.48125 lon = -155.933222 t = Time('2006-01-15 21:24:37.5', format='iso', scale='utc', precision=6, location=(lon, lat)) t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform assert t.utc.iso == '2006-01-15 21:24:37.500000' assert t.ut1.iso == '2006-01-15 21:24:37.834100' assert t.tai.iso == '2006-01-15 21:25:10.500000' assert t.tt.iso == '2006-01-15 21:25:42.684000' assert t.tcg.iso == '2006-01-15 21:25:43.322690' assert t.tdb.iso == '2006-01-15 21:25:42.684373' assert t.tcb.iso == '2006-01-15 21:25:56.893952' def test_location(self): """Check that location creates an EarthLocation object, and that such objects can be used as arguments. """ lat = 19.48125 lon = -155.933222 t = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc', precision=6, location=(lon, lat)) assert isinstance(t.location, EarthLocation) location = EarthLocation(lon, lat) t2 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc', precision=6, location=location) assert isinstance(t2.location, EarthLocation) assert t2.location == t.location t3 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc', precision=6, location=(location.x, location.y, location.z)) assert isinstance(t3.location, EarthLocation) assert t3.location == t.location def test_location_array(self): """Check that location arrays are checked for size and used for the corresponding times. Also checks that erfa can handle array-valued locations, and can broadcast these if needed. """ lat = 19.48125 lon = -155.933222 t = Time(['2006-01-15 21:24:37.5']*2, format='iso', scale='utc', precision=6, location=(lon, lat)) assert np.all(t.utc.iso == '2006-01-15 21:24:37.500000') assert np.all(t.tdb.iso[0] == '2006-01-15 21:25:42.684373') t2 = Time(['2006-01-15 21:24:37.5']*2, format='iso', scale='utc', precision=6, location=(np.array([lon, 0]), np.array([lat, 0]))) assert np.all(t2.utc.iso == '2006-01-15 21:24:37.500000') assert t2.tdb.iso[0] == '2006-01-15 21:25:42.684373' assert t2.tdb.iso[1] != '2006-01-15 21:25:42.684373' with pytest.raises(ValueError): # 1 time, but two locations Time('2006-01-15 21:24:37.5', format='iso', scale='utc', precision=6, location=(np.array([lon, 0]), np.array([lat, 0]))) with pytest.raises(ValueError): # 3 times, but two locations Time(['2006-01-15 21:24:37.5']*3, format='iso', scale='utc', precision=6, location=(np.array([lon, 0]), np.array([lat, 0]))) # multidimensional mjd = np.arange(50000., 50008.).reshape(4, 2) t3 = Time(mjd, format='mjd', scale='utc', location=(lon, lat)) assert t3.shape == (4, 2) assert t3.location.shape == () assert t3.tdb.shape == t3.shape t4 = Time(mjd, format='mjd', scale='utc', location=(np.array([lon, 0]), np.array([lat, 0]))) assert t4.shape == (4, 2) assert t4.location.shape == t4.shape assert t4.tdb.shape == t4.shape t5 = Time(mjd, format='mjd', scale='utc', location=(np.array([[lon], [0], [0], [0]]), np.array([[lat], [0], [0], [0]]))) assert t5.shape == (4, 2) assert t5.location.shape == t5.shape assert t5.tdb.shape == t5.shape def test_all_transforms(self): """Test that all transforms work. Does not test correctness, except reversibility [#2074]""" lat = 19.48125 lon = -155.933222 for scale1 in TIME_SCALES: t1 = Time('2006-01-15 21:24:37.5', format='iso', scale=scale1, location=(lon, lat)) for scale2 in TIME_SCALES: t2 = getattr(t1, scale2) t21 = getattr(t2, scale1) assert allclose_jd(t21.jd, t1.jd) def test_creating_all_formats(self): """Create a time object using each defined format""" Time(2000.5, format='decimalyear') Time(100.0, format='cxcsec') Time(100.0, format='unix') Time(100.0, format='gps') Time(1950.0, format='byear', scale='tai') Time(2000.0, format='jyear', scale='tai') Time('B1950.0', format='byear_str', scale='tai') Time('J2000.0', format='jyear_str', scale='tai') Time('2000-01-01 12:23:34.0', format='iso', scale='tai') Time('2000-01-01 12:23:34.0Z', format='iso', scale='utc') Time('2000-01-01T12:23:34.0', format='isot', scale='tai') Time('2000-01-01T12:23:34.0Z', format='isot', scale='utc') Time('2000-01-01T12:23:34.0', format='fits') Time('2000-01-01T12:23:34.0', format='fits', scale='tdb') Time('2000-01-01T12:23:34.0(TDB)', format='fits') Time(2400000.5, 51544.0333981, format='jd', scale='tai') Time(0.0, 51544.0333981, format='mjd', scale='tai') Time('2000:001:12:23:34.0', format='yday', scale='tai') Time('2000:001:12:23:34.0Z', format='yday', scale='utc') dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456) Time(dt, format='datetime', scale='tai') Time([dt, dt], format='datetime', scale='tai') def test_datetime(self): """ Test datetime format, including guessing the format from the input type by not providing the format keyword to Time. """ dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456) dt2 = datetime.datetime(2001, 1, 1) t = Time(dt, scale='utc', precision=9) assert t.iso == '2000-01-02 03:04:05.123456000' assert t.datetime == dt assert t.value == dt t2 = Time(t.iso, scale='utc') assert t2.datetime == dt t = Time([dt, dt2], scale='utc') assert np.all(t.value == [dt, dt2]) t = Time('2000-01-01 01:01:01.123456789', scale='tai') assert t.datetime == datetime.datetime(2000, 1, 1, 1, 1, 1, 123457) # broadcasting dt3 = (dt + (dt2-dt)*np.arange(12)).reshape(4, 3) t3 = Time(dt3, scale='utc') assert t3.shape == (4, 3) assert t3[2, 1].value == dt3[2, 1] assert t3[2, 1] == Time(dt3[2, 1]) assert np.all(t3.value == dt3) assert np.all(t3[1].value == dt3[1]) assert np.all(t3[:, 2] == Time(dt3[:, 2])) assert Time(t3[2, 0]) == t3[2, 0] def test_epoch_transform(self): """Besselian and julian epoch transforms""" jd = 2457073.05631 t = Time(jd, format='jd', scale='tai', precision=6) assert allclose_year(t.byear, 2015.1365941020817) assert allclose_year(t.jyear, 2015.1349933196439) assert t.byear_str == 'B2015.136594' assert t.jyear_str == 'J2015.134993' t2 = Time(t.byear, format='byear', scale='tai') assert allclose_jd(t2.jd, jd) t2 = Time(t.jyear, format='jyear', scale='tai') assert allclose_jd(t2.jd, jd) t = Time('J2015.134993', scale='tai', precision=6) assert np.allclose(t.jd, jd, rtol=1e-10, atol=0) # J2015.134993 has 10 digit precision assert t.byear_str == 'B2015.136594' def test_input_validation(self): """Wrong input type raises error""" times = [10, 20] with pytest.raises(ValueError): Time(times, format='iso', scale='utc') with pytest.raises(ValueError): Time('2000:001', format='jd', scale='utc') with pytest.raises(ValueError): Time([50000.0], ['bad'], format='mjd', scale='tai') with pytest.raises(ValueError): Time(50000.0, 'bad', format='mjd', scale='tai') with pytest.raises(ValueError): Time('2005-08-04T00:01:02.000Z', scale='tai') # regression test against #3396 with pytest.raises(ValueError): Time(np.nan, format='jd', scale='utc') with pytest.raises(ValueError): Time('2000-01-02T03:04:05(TAI)', scale='utc') with pytest.raises(ValueError): Time('2000-01-02T03:04:05(TAI') with pytest.raises(ValueError): Time('2000-01-02T03:04:05(UT(NIST)') def test_utc_leap_sec(self): """Time behaves properly near or in UTC leap second. This uses the 2012-06-30 leap second for testing.""" for year, month, day in ((2012, 6, 30), (2016, 12, 31)): # Start with a day without a leap second and note rollover yyyy_mm = '{:04d}-{:02d}'.format(year, month) yyyy_mm_dd = '{:04d}-{:02d}-{:02d}'.format(year, month, day) t1 = Time(yyyy_mm + '-01 23:59:60.0', scale='utc') assert t1.iso == yyyy_mm + '-02 00:00:00.000' # Leap second is different t1 = Time(yyyy_mm_dd + ' 23:59:59.900', scale='utc') assert t1.iso == yyyy_mm_dd + ' 23:59:59.900' t1 = Time(yyyy_mm_dd + ' 23:59:60.000', scale='utc') assert t1.iso == yyyy_mm_dd + ' 23:59:60.000' t1 = Time(yyyy_mm_dd + ' 23:59:60.999', scale='utc') assert t1.iso == yyyy_mm_dd + ' 23:59:60.999' if month == 6: yyyy_mm_dd_plus1 = '{:04d}-07-01'.format(year) else: yyyy_mm_dd_plus1 = '{:04d}-01-01'.format(year+1) t1 = Time(yyyy_mm_dd + ' 23:59:61.0', scale='utc') assert t1.iso == yyyy_mm_dd_plus1 + ' 00:00:00.000' # Delta time gives 2 seconds here as expected t0 = Time(yyyy_mm_dd + ' 23:59:59', scale='utc') t1 = Time(yyyy_mm_dd_plus1 + ' 00:00:00', scale='utc') assert allclose_sec((t1 - t0).sec, 2.0) def test_init_from_time_objects(self): """Initialize from one or more Time objects""" t1 = Time('2007:001', scale='tai') t2 = Time(['2007-01-02', '2007-01-03'], scale='utc') # Init from a list of Time objects without an explicit scale t3 = Time([t1, t2]) # Test that init appropriately combines a scalar (t1) and list (t2) # and that scale and format are same as first element. assert len(t3) == 3 assert t3.scale == t1.scale assert t3.format == t1.format # t1 format is yday assert np.all(t3.value == np.concatenate([[t1.yday], t2.tai.yday])) # Init from a single Time object without a scale t3 = Time(t1) assert t3.isscalar assert t3.scale == t1.scale assert t3.format == t1.format assert np.all(t3.value == t1.value) # Init from a single Time object with scale specified t3 = Time(t1, scale='utc') assert t3.scale == 'utc' assert np.all(t3.value == t1.utc.value) # Init from a list of Time object with scale specified t3 = Time([t1, t2], scale='tt') assert t3.scale == 'tt' assert t3.format == t1.format # yday assert np.all(t3.value == np.concatenate([[t1.tt.yday], t2.tt.yday])) # OK, how likely is this... but might as well test. mjd = np.arange(50000., 50006.) frac = np.arange(0., 0.999, 0.2) t4 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc') t5 = Time([t4[:2], t4[4:5]]) assert t5.shape == (3, 5) class TestVal2(): """Tests related to val2""" def test_val2_ignored(self): """Test that val2 is ignored for string input""" t = Time('2001:001', 'ignored', scale='utc') assert t.yday == '2001:001:00:00:00.000' def test_val2(self): """Various tests of the val2 input""" t = Time([0.0, 50000.0], [50000.0, 0.0], format='mjd', scale='tai') assert t.mjd[0] == t.mjd[1] assert t.jd[0] == t.jd[1] def test_val_broadcasts_against_val2(self): mjd = np.arange(50000., 50007.) frac = np.arange(0., 0.999, 0.2) t = Time(mjd[:, np.newaxis], frac, format='mjd', scale='utc') assert t.shape == (7, 5) with pytest.raises(ValueError): Time([0.0, 50000.0], [0.0, 1.0, 2.0], format='mjd', scale='tai') class TestSubFormat(): """Test input and output subformat functionality""" def test_input_subformat(self): """Input subformat selection""" # Heterogeneous input formats with in_subfmt='*' (default) times = ['2000-01-01', '2000-01-01 01:01', '2000-01-01 01:01:01', '2000-01-01 01:01:01.123'] t = Time(times, format='iso', scale='tai') assert np.all(t.iso == np.array(['2000-01-01 00:00:00.000', '2000-01-01 01:01:00.000', '2000-01-01 01:01:01.000', '2000-01-01 01:01:01.123'])) # Heterogeneous input formats with in_subfmt='date_*' times = ['2000-01-01 01:01', '2000-01-01 01:01:01', '2000-01-01 01:01:01.123'] t = Time(times, format='iso', scale='tai', in_subfmt='date_*') assert np.all(t.iso == np.array(['2000-01-01 01:01:00.000', '2000-01-01 01:01:01.000', '2000-01-01 01:01:01.123'])) def test_input_subformat_fail(self): """Failed format matching""" with pytest.raises(ValueError): Time('2000-01-01 01:01', format='iso', scale='tai', in_subfmt='date') def test_bad_input_subformat(self): """Non-existent input subformat""" with pytest.raises(ValueError): Time('2000-01-01 01:01', format='iso', scale='tai', in_subfmt='doesnt exist') def test_output_subformat(self): """Input subformat selection""" # Heterogeneous input formats with in_subfmt='*' (default) times = ['2000-01-01', '2000-01-01 01:01', '2000-01-01 01:01:01', '2000-01-01 01:01:01.123'] t = Time(times, format='iso', scale='tai', out_subfmt='date_hm') assert np.all(t.iso == np.array(['2000-01-01 00:00', '2000-01-01 01:01', '2000-01-01 01:01', '2000-01-01 01:01'])) def test_fits_format(self): """FITS format includes bigger years.""" # Heterogeneous input formats with in_subfmt='*' (default) times = ['2000-01-01', '2000-01-01T01:01:01', '2000-01-01T01:01:01.123'] t = Time(times, format='fits', scale='tai') assert np.all(t.fits == np.array(['2000-01-01T00:00:00.000(TAI)', '2000-01-01T01:01:01.000(TAI)', '2000-01-01T01:01:01.123(TAI)'])) # Explicit long format for output, default scale is UTC. t2 = Time(times, format='fits', out_subfmt='long*') assert np.all(t2.fits == np.array(['+02000-01-01T00:00:00.000(UTC)', '+02000-01-01T01:01:01.000(UTC)', '+02000-01-01T01:01:01.123(UTC)'])) # Implicit long format for output, because of negative year. times[2] = '-00594-01-01' t3 = Time(times, format='fits', scale='tai') assert np.all(t3.fits == np.array(['+02000-01-01T00:00:00.000(TAI)', '+02000-01-01T01:01:01.000(TAI)', '-00594-01-01T00:00:00.000(TAI)'])) # Implicit long format for output, because of large positive year. times[2] = '+10594-01-01' t4 = Time(times, format='fits', scale='tai') assert np.all(t4.fits == np.array(['+02000-01-01T00:00:00.000(TAI)', '+02000-01-01T01:01:01.000(TAI)', '+10594-01-01T00:00:00.000(TAI)'])) def test_yday_format(self): """Year:Day_of_year format""" # Heterogeneous input formats with in_subfmt='*' (default) times = ['2000-12-01', '2001-12-01 01:01:01.123'] t = Time(times, format='iso', scale='tai') t.out_subfmt = 'date_hm' assert np.all(t.yday == np.array(['2000:336:00:00', '2001:335:01:01'])) t.out_subfmt = '*' assert np.all(t.yday == np.array(['2000:336:00:00:00.000', '2001:335:01:01:01.123'])) def test_scale_input(self): """Test for issues related to scale input""" # Check case where required scale is defined by the TimeFormat. # All three should work. t = Time(100.0, format='cxcsec', scale='utc') assert t.scale == 'utc' t = Time(100.0, format='unix', scale='tai') assert t.scale == 'tai' t = Time(100.0, format='gps', scale='utc') assert t.scale == 'utc' # Check that bad scale is caught when format is specified with pytest.raises(ScaleValueError): Time(1950.0, format='byear', scale='bad scale') # Check that bad scale is caught when format is auto-determined with pytest.raises(ScaleValueError): Time('2000:001:00:00:00', scale='bad scale') def test_fits_scale(self): """Test that scale gets interpreted correctly for FITS strings.""" t = Time('2000-01-02(TAI)') assert t.scale == 'tai' # Test deprecated scale. t = Time('2000-01-02(IAT)') assert t.scale == 'tai' # Test with scale and FITS string scale t = Time('2045-11-08T00:00:00.000(UTC)', scale='utc') assert t.scale == 'utc' # Check that inconsistent scales lead to errors. with pytest.raises(ValueError): Time('2000-01-02(TAI)', scale='utc') with pytest.raises(ValueError): Time(['2000-01-02(TAI)', '2001-02-03(UTC)']) # Check that inconsistent FITS string scales lead to errors. with pytest.raises(ValueError): Time(['2000-01-02(TAI)', '2001-02-03(IAT)']) # Check that inconsistent realizations lead to errors. with pytest.raises(ValueError): Time(['2000-01-02(ET(NIST))', '2001-02-03(ET)']) def test_fits_scale_representation(self): t = Time('1960-01-02T03:04:05.678(ET(NIST))') assert t.scale == 'tt' assert t.value == '1960-01-02T03:04:05.678(ET(NIST))' def test_scale_default(self): """Test behavior when no scale is provided""" # These first three are TimeFromEpoch and have an intrinsic time scale t = Time(100.0, format='cxcsec') assert t.scale == 'tt' t = Time(100.0, format='unix') assert t.scale == 'utc' t = Time(100.0, format='gps') assert t.scale == 'tai' for date in ('J2000', '2000:001', '2000-01-01T00:00:00'): t = Time(date) assert t.scale == 'utc' t = Time(2000.1, format='byear') assert t.scale == 'utc' def test_epoch_times(self): """Test time formats derived from EpochFromTime""" t = Time(0.0, format='cxcsec', scale='tai') assert t.tt.iso == '1998-01-01 00:00:00.000' # Create new time object from this one and change scale, format t2 = Time(t, scale='tt', format='iso') assert t2.value == '1998-01-01 00:00:00.000' # Value take from Chandra.Time.DateTime('2010:001:00:00:00').secs t_cxcsec = 378691266.184 t = Time(t_cxcsec, format='cxcsec', scale='utc') assert allclose_sec(t.value, t_cxcsec) assert allclose_sec(t.cxcsec, t_cxcsec) assert allclose_sec(t.tt.value, t_cxcsec) assert allclose_sec(t.tt.cxcsec, t_cxcsec) assert t.yday == '2010:001:00:00:00.000' t = Time('2010:001:00:00:00.000', scale='utc') assert allclose_sec(t.cxcsec, t_cxcsec) assert allclose_sec(t.tt.cxcsec, t_cxcsec) # Value from: # d = datetime.datetime(2000, 1, 1) # matplotlib.pylab.dates.date2num(d) t = Time('2000-01-01 00:00:00', scale='utc') assert np.allclose(t.plot_date, 730120.0, atol=1e-5, rtol=0) # Round trip through epoch time for scale in ('utc', 'tt'): t = Time('2000:001', scale=scale) t2 = Time(t.unix, scale=scale, format='unix') assert getattr(t2, scale).iso == '2000-01-01 00:00:00.000' # Test unix time. Values taken from http://en.wikipedia.org/wiki/Unix_time t = Time('2013-05-20 21:18:46', scale='utc') assert allclose_sec(t.unix, 1369084726.0) assert allclose_sec(t.tt.unix, 1369084726.0) # Values from issue #1118 t = Time('2004-09-16T23:59:59', scale='utc') assert allclose_sec(t.unix, 1095379199.0) class TestSofaErrors(): """Test that erfa status return values are handled correctly""" def test_bad_time(self): iy = np.array([2000], dtype=np.intc) im = np.array([2000], dtype=np.intc) # bad month id = np.array([2000], dtype=np.intc) # bad day with pytest.raises(ValueError): # bad month, fatal error djm0, djm = erfa.cal2jd(iy, im, id) iy[0] = -5000 im[0] = 2 with pytest.raises(ValueError): # bad year, fatal error djm0, djm = erfa.cal2jd(iy, im, id) iy[0] = 2000 with catch_warnings() as w: djm0, djm = erfa.cal2jd(iy, im, id) assert len(w) == 1 assert 'bad day (JD computed)' in str(w[0].message) assert allclose_jd(djm0, [2400000.5]) assert allclose_jd(djm, [53574.]) class TestCopyReplicate(): """Test issues related to copying and replicating data""" def test_immutable_input(self): """Internals are never mutable.""" jds = np.array([2450000.5], dtype=np.double) t = Time(jds, format='jd', scale='tai') assert allclose_jd(t.jd, jds) jds[0] = 2458654 assert not allclose_jd(t.jd, jds) mjds = np.array([50000.0], dtype=np.double) t = Time(mjds, format='mjd', scale='tai') assert allclose_jd(t.jd, [2450000.5]) mjds[0] = 0.0 assert allclose_jd(t.jd, [2450000.5]) def test_replicate(self): """Test replicate method""" t = Time(['2000:001'], format='yday', scale='tai', location=('45d', '45d')) t_yday = t.yday t_loc_x = t.location.x.copy() t2 = t.replicate() assert t.yday == t2.yday assert t.format == t2.format assert t.scale == t2.scale assert t.location == t2.location # This is not allowed publicly, but here we hack the internal time # and location values to show that t and t2 are sharing references. t2._time.jd1 += 100.0 # Need to delete the cached yday attributes (only an issue because # of the internal _time hack). del t.cache del t2.cache assert t.yday == t2.yday assert t.yday != t_yday # prove that it changed t2_loc_x_view = t2.location.x t2_loc_x_view[()] = 0 # use 0 to avoid having to give units assert t2.location.x == t2_loc_x_view assert t.location.x == t2.location.x assert t.location.x != t_loc_x # prove that it changed def test_copy(self): """Test copy method""" t = Time('2000:001', format='yday', scale='tai', location=('45d', '45d')) t_yday = t.yday t_loc_x = t.location.x.copy() t2 = t.copy() assert t.yday == t2.yday # This is not allowed publicly, but here we hack the internal time # and location values to show that t and t2 are not sharing references. t2._time.jd1 += 100.0 # Need to delete the cached yday attributes (only an issue because # of the internal _time hack). del t.cache del t2.cache assert t.yday != t2.yday assert t.yday == t_yday # prove that it did not change t2_loc_x_view = t2.location.x t2_loc_x_view[()] = 0 # use 0 to avoid having to give units assert t2.location.x == t2_loc_x_view assert t.location.x != t2.location.x assert t.location.x == t_loc_x # prove that it changed def test_python_builtin_copy(): t = Time('2000:001', format='yday', scale='tai') t2 = copy.copy(t) t3 = copy.deepcopy(t) assert t.jd == t2.jd assert t.jd == t3.jd def test_now(): """ Tests creating a Time object with the `now` class method. """ now = datetime.datetime.utcnow() t = Time.now() assert t.format == 'datetime' assert t.scale == 'utc' dt = t.datetime - now # a datetime.timedelta object # this gives a .1 second margin between the `utcnow` call and the `Time` # initializer, which is really way more generous than necessary - typical # times are more like microseconds. But it seems safer in case some # platforms have slow clock calls or something. assert dt.total_seconds() < 0.1 def test_decimalyear(): t = Time('2001:001', format='yday') assert t.decimalyear == 2001.0 t = Time(2000.0, [0.5, 0.75], format='decimalyear') assert np.all(t.value == [2000.5, 2000.75]) jd0 = Time('2000:001').jd jd1 = Time('2001:001').jd d_jd = jd1 - jd0 assert np.all(t.jd == [jd0 + 0.5 * d_jd, jd0 + 0.75 * d_jd]) def test_fits_year0(): t = Time(1721425.5, format='jd') assert t.fits == '0001-01-01T00:00:00.000(UTC)' t = Time(1721425.5 - 366., format='jd') assert t.fits == '+00000-01-01T00:00:00.000(UTC)' t = Time(1721425.5 - 366. - 365., format='jd') assert t.fits == '-00001-01-01T00:00:00.000(UTC)' def test_fits_year10000(): t = Time(5373484.5, format='jd', scale='tai') assert t.fits == '+10000-01-01T00:00:00.000(TAI)' t = Time(5373484.5 - 365., format='jd', scale='tai') assert t.fits == '9999-01-01T00:00:00.000(TAI)' t = Time(5373484.5, -1./24./3600., format='jd', scale='tai') assert t.fits == '9999-12-31T23:59:59.000(TAI)' def test_dir(): t = Time('2000:001', format='yday', scale='tai') assert 'utc' in dir(t) def test_bool(): """Any Time object should evaluate to True unless it is empty [#3520].""" t = Time(np.arange(50000, 50010), format='mjd', scale='utc') assert bool(t) is True assert bool(t[0]) is True assert bool(t[:0]) is False def test_len_size(): """Check length of Time objects and that scalar ones do not have one.""" t = Time(np.arange(50000, 50010), format='mjd', scale='utc') assert len(t) == 10 and t.size == 10 t1 = Time(np.arange(50000, 50010).reshape(2, 5), format='mjd', scale='utc') assert len(t1) == 2 and t1.size == 10 # Can have length 1 or length 0 arrays. t2 = t[:1] assert len(t2) == 1 and t2.size == 1 t3 = t[:0] assert len(t3) == 0 and t3.size == 0 # But cannot get length from scalar. t4 = t[0] with pytest.raises(TypeError) as err: len(t4) # Ensure we're not just getting the old error of # "object of type 'float' has no len()". assert 'Time' in str(err) def test_TimeFormat_scale(): """guard against recurrence of #1122, where TimeFormat class looses uses attributes (delta_ut1_utc here), preventing conversion to unix, cxc""" t = Time('1900-01-01', scale='ut1') t.delta_ut1_utc = 0.0 t.unix assert t.unix == t.utc.unix @pytest.mark.remote_data def test_scale_conversion(): Time(Time.now().cxcsec, format='cxcsec', scale='ut1') def test_byteorder(): """Ensure that bigendian and little-endian both work (closes #2942)""" mjd = np.array([53000.00, 54000.00]) big_endian = mjd.astype('>f8') little_endian = mjd.astype('<f8') time_mjd = Time(mjd, format='mjd') time_big = Time(big_endian, format='mjd') time_little = Time(little_endian, format='mjd') assert np.all(time_big == time_mjd) assert np.all(time_little == time_mjd) def test_datetime_tzinfo(): """ Test #3160 that time zone info in datetime objects is respected. """ class TZm6(datetime.tzinfo): def utcoffset(self, dt): return datetime.timedelta(hours=-6) d = datetime.datetime(2002, 1, 2, 10, 3, 4, tzinfo=TZm6()) t = Time(d) assert t.value == datetime.datetime(2002, 1, 2, 16, 3, 4) def test_subfmts_regex(): """ Test having a custom subfmts with a regular expression """ class TimeLongYear(TimeString): name = 'longyear' subfmts = (('date', r'(?P<year>[+-]\d{5})-%m-%d', # hybrid '{year:+06d}-{mon:02d}-{day:02d}'),) t = Time('+02000-02-03', format='longyear') assert t.value == '+02000-02-03' assert t.jd == Time('2000-02-03').jd def test_set_format_basic(): """ Test basics of setting format attribute. """ for format, value in (('jd', 2451577.5), ('mjd', 51577.0), ('cxcsec', 65923264.184), # confirmed with Chandra.Time ('datetime', datetime.datetime(2000, 2, 3, 0, 0)), ('iso', '2000-02-03 00:00:00.000')): t = Time('+02000-02-03', format='fits') t0 = t.replicate() t.format = format assert t.value == value # Internal jd1 and jd2 are preserved assert t._time.jd1 is t0._time.jd1 assert t._time.jd2 is t0._time.jd2 def test_set_format_shares_subfmt(): """ Set format and round trip through a format that shares out_subfmt """ t = Time('+02000-02-03', format='fits', out_subfmt='date_hms', precision=5) tc = t.copy() t.format = 'isot' assert t.precision == 5 assert t.out_subfmt == 'date_hms' assert t.value == '2000-02-03T00:00:00.00000' t.format = 'fits' assert t.value == tc.value assert t.precision == 5 def test_set_format_does_not_share_subfmt(): """ Set format and round trip through a format that does not share out_subfmt """ t = Time('+02000-02-03', format='fits', out_subfmt='longdate') t.format = 'isot' assert t.out_subfmt == '*' # longdate_hms not there, goes to default assert t.value == '2000-02-03T00:00:00.000' t.format = 'fits' assert t.out_subfmt == '*' assert t.value == '2000-02-03T00:00:00.000(UTC)' # date_hms def test_replicate_value_error(): """ Passing a bad format to replicate should raise ValueError, not KeyError. PR #3857. """ t1 = Time('2007:001', scale='tai') with pytest.raises(ValueError) as err: t1.replicate(format='definitely_not_a_valid_format') assert 'format must be one of' in str(err) def test_remove_astropy_time(): """ Make sure that 'astropy_time' format is really gone after #3857. Kind of silly test but just to be sure. """ t1 = Time('2007:001', scale='tai') assert 'astropy_time' not in t1.FORMATS with pytest.raises(ValueError) as err: Time(t1, format='astropy_time') assert 'format must be one of' in str(err) def test_isiterable(): """ Ensure that scalar `Time` instances are not reported as iterable by the `isiterable` utility. Regression test for https://github.com/astropy/astropy/issues/4048 """ t1 = Time.now() assert not isiterable(t1) t2 = Time(['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00'], format='iso', scale='utc') assert isiterable(t2) def test_to_datetime(): tz = TimezoneInfo(utc_offset=-10*u.hour, tzname='US/Hawaii') # The above lines produces a `datetime.tzinfo` object similar to: # tzinfo = pytz.timezone('US/Hawaii') time = Time('2010-09-03 00:00:00') tz_aware_datetime = time.to_datetime(tz) assert tz_aware_datetime.time() == datetime.time(14, 0) forced_to_astropy_time = Time(tz_aware_datetime) assert tz.tzname(time.datetime) == tz_aware_datetime.tzname() assert time == forced_to_astropy_time # Test non-scalar time inputs: time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00', '1990-09-03 06:00:00']) tz_aware_datetime = time.to_datetime(tz) forced_to_astropy_time = Time(tz_aware_datetime) for dt, tz_dt in zip(time.datetime, tz_aware_datetime): assert tz.tzname(dt) == tz_dt.tzname() assert np.all(time == forced_to_astropy_time) with pytest.raises(ValueError) as e: Time('2015-06-30 23:59:60.000').to_datetime() assert 'does not support leap seconds' in str(e.message) @pytest.mark.skipif('not HAS_PYTZ') def test_to_datetime_pytz(): tz = pytz.timezone('US/Hawaii') time = Time('2010-09-03 00:00:00') tz_aware_datetime = time.to_datetime(tz) forced_to_astropy_time = Time(tz_aware_datetime) assert tz_aware_datetime.time() == datetime.time(14, 0) assert tz.tzname(time.datetime) == tz_aware_datetime.tzname() assert time == forced_to_astropy_time # Test non-scalar time inputs: time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00', '1990-09-03 06:00:00']) tz_aware_datetime = time.to_datetime(tz) forced_to_astropy_time = Time(tz_aware_datetime) for dt, tz_dt in zip(time.datetime, tz_aware_datetime): assert tz.tzname(dt) == tz_dt.tzname() assert np.all(time == forced_to_astropy_time) def test_cache(): t = Time('2010-09-03 00:00:00') t2 = Time('2010-09-03 00:00:00') # Time starts out without a cache assert 'cache' not in t._time.__dict__ # Access the iso format and confirm that the cached version is as expected t.iso assert t.cache['format']['iso'] == t2.iso # Access the TAI scale and confirm that the cached version is as expected t.tai assert t.cache['scale']['tai'] == t2.tai # New Time object after scale transform does not have a cache yet assert 'cache' not in t.tt._time.__dict__ # Clear the cache del t.cache assert 'cache' not in t._time.__dict__ # Check accessing the cache creates an empty dictionary assert not t.cache assert 'cache' in t._time.__dict__ def test_epoch_date_jd_is_day_fraction(): """ Ensure that jd1 and jd2 of an epoch Time are respect the (day, fraction) convention (see #6638) """ t0 = Time("J2000", scale="tdb") assert t0.jd1 == 2451545.0 assert t0.jd2 == 0.0 t1 = Time(datetime.datetime(2000, 1, 1, 12, 0, 0), scale="tdb") assert t1.jd1 == 2451545.0 assert t1.jd2 == 0.0 def test_sum_is_equivalent(): """ Ensure that two equal dates defined in different ways behave equally (#6638) """ t0 = Time("J2000", scale="tdb") t1 = Time("2000-01-01 12:00:00", scale="tdb") assert t0 == t1 assert (t0 + 1 * u.second) == (t1 + 1 * u.second) def test_string_valued_columns(): # Columns have a nice shim that translates bytes to string as needed. # Ensure Time can handle these. Use multi-d array just to be sure. times = [[['{:04d}-{:02d}-{:02d}'.format(y, m, d) for d in range(1, 3)] for m in range(5, 7)] for y in range(2012, 2014)] cutf32 = Column(times) cbytes = cutf32.astype('S') tutf32 = Time(cutf32) tbytes = Time(cbytes) assert np.all(tutf32 == tbytes) tutf32 = Time(Column(['B1950'])) tbytes = Time(Column([b'B1950'])) assert tutf32 == tbytes # Regression tests for arrays with entries with unequal length. gh-6903. times = Column([b'2012-01-01', b'2012-01-01T00:00:00']) assert np.all(Time(times) == Time(['2012-01-01', '2012-01-01T00:00:00'])) def test_bytes_input(): tstring = '2011-01-02T03:04:05' tbytes = b'2011-01-02T03:04:05' assert tbytes.decode('ascii') == tstring t0 = Time(tstring) t1 = Time(tbytes) assert t1 == t0 tarray = np.array(tbytes) assert tarray.dtype.kind == 'S' t2 = Time(tarray) assert t2 == t0 def test_writeable_flag(): t = Time([1, 2, 3], format='cxcsec') t[1] = 5.0 assert allclose_sec(t[1].value, 5.0) t.writeable = False with pytest.raises(ValueError) as err: t[1] = 5.0 assert 'Time object is read-only. Make a copy()' in str(err) with pytest.raises(ValueError) as err: t[:] = 5.0 assert 'Time object is read-only. Make a copy()' in str(err) t.writeable = True t[1] = 10.0 assert allclose_sec(t[1].value, 10.0) # Scalar is not writeable t = Time('2000:001', scale='utc') with pytest.raises(ValueError) as err: t[()] = '2000:002' assert 'scalar Time object is read-only.' in str(err) # Transformed attribute is not writeable t = Time(['2000:001', '2000:002'], scale='utc') t2 = t.tt # t2 is read-only now because t.tt is cached with pytest.raises(ValueError) as err: t2[0] = '2005:001' assert 'Time object is read-only. Make a copy()' in str(err) def test_setitem_location(): loc = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m) t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc) # Succeeds because the right hand side makes no implication about # location and just inherits t.location t[0, 0] = 0 assert allclose_sec(t.value, [[0, 2], [3, 4]]) # Fails because the right hand side has location=None with pytest.raises(ValueError) as err: t[0, 0] = Time(-1, format='cxcsec') assert ('cannot set to Time with different location: ' 'expected location=(1.0, 3.0, 5.0) m and ' 'got location=None') in str(err) # Succeeds because the right hand side correctly sets location t[0, 0] = Time(-2, format='cxcsec', location=loc[0]) assert allclose_sec(t.value, [[-2, 2], [3, 4]]) # Fails because the right hand side has different location with pytest.raises(ValueError) as err: t[0, 0] = Time(-2, format='cxcsec', location=loc[1]) assert ('cannot set to Time with different location: ' 'expected location=(1.0, 3.0, 5.0) m and ' 'got location=(2.0, 4.0, 6.0) m') in str(err) # Fails because the Time has None location and RHS has defined location t = Time([[1, 2], [3, 4]], format='cxcsec') with pytest.raises(ValueError) as err: t[0, 0] = Time(-2, format='cxcsec', location=loc[1]) assert ('cannot set to Time with different location: ' 'expected location=None and ' 'got location=(2.0, 4.0, 6.0) m') in str(err) # Broadcasting works t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc) t[0, :] = Time([-3, -4], format='cxcsec', location=loc) assert allclose_sec(t.value, [[-3, -4], [3, 4]]) def test_setitem_from_python_objects(): t = Time([[1, 2], [3, 4]], format='cxcsec') assert t.cache == {} t.iso assert 'iso' in t.cache['format'] assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:00:02.000'], ['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']]) # Setting item clears cache t[0, 1] = 100 assert t.cache == {} assert allclose_sec(t.value, [[1, 100], [3, 4]]) assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:01:40.000'], ['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']]) # Set with a float value t.iso t[1, :] = 200 assert t.cache == {} assert allclose_sec(t.value, [[1, 100], [200, 200]]) # Array of strings in yday format t[:, 1] = ['1998:002', '1998:003'] assert allclose_sec(t.value, [[1, 86400 * 1], [200, 86400 * 2]]) # Incompatible numeric value t = Time(['2000:001', '2000:002']) t[0] = '2001:001' with pytest.raises(ValueError) as err: t[0] = 100 assert 'cannot convert value to a compatible Time object' in str(err) def test_setitem_from_time_objects(): """Set from existing Time object. """ # Set from time object with different scale t = Time(['2000:001', '2000:002'], scale='utc') t2 = Time(['2000:010'], scale='tai') t[1] = t2[0] assert t.value[1] == t2.utc.value[0] # Time object with different scale and format t = Time(['2000:001', '2000:002'], scale='utc') t2.format = 'jyear' t[1] = t2[0] assert t.yday[1] == t2.utc.yday[0] def test_setitem_bad_item(): t = Time([1, 2], format='cxcsec') with pytest.raises(IndexError): t['asdf'] = 3 def test_setitem_deltas(): """Setting invalidates any transform deltas""" t = Time([1, 2], format='cxcsec') t.delta_tdb_tt = [1, 2] t.delta_ut1_utc = [3, 4] t[1] = 3 assert not hasattr(t, '_delta_tdb_tt') assert not hasattr(t, '_delta_ut1_utc')
83c1f0e8e4e7f191fb861aee82a2fb8d453ac8dc1c41a58faef04b67c466a257
import functools import pytest import numpy as np from .. import Time, TimeDelta allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0) allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52, atol=2. ** -52) # 20 ps atol allclose_sec = functools.partial(np.allclose, rtol=2. ** -52, atol=2. ** -52 * 24 * 3600) # 20 ps atol dt_tiny = TimeDelta(2. ** -52, format='jd') def test_addition(): """Check that an addition at the limit of precision (2^-52) is seen""" t = Time(2455555., 0.5, format='jd', scale='utc') t_dt = t + dt_tiny assert t_dt.jd1 == t.jd1 and t_dt.jd2 != t.jd2 # Check that the addition is exactly reversed by the corresponding subtraction t2 = t_dt - dt_tiny assert t2.jd1 == t.jd1 and t2.jd2 == t.jd2 def test_mult_div(): """Test precision with multiply and divide""" dt_small = 6 * dt_tiny # pick a number that will leave remainder if divided by 6. dt_big = TimeDelta(20000., format='jd') dt_big_small_by_6 = (dt_big + dt_small) / 6. dt_frac = dt_big_small_by_6 - TimeDelta(3333., format='jd') assert allclose_jd2(dt_frac.jd2, 0.33333333333333354) def test_init_variations(): """Check that 3 ways of specifying a time + small offset are equivalent""" dt_tiny_sec = dt_tiny.jd2 * 86400. t1 = Time(1e11, format='cxcsec') + dt_tiny t2 = Time(1e11, dt_tiny_sec, format='cxcsec') t3 = Time(dt_tiny_sec, 1e11, format='cxcsec') assert t1.jd1 == t2.jd1 assert t1.jd2 == t3.jd2 assert t1.jd1 == t2.jd1 assert t1.jd2 == t3.jd2 def test_precision_exceeds_64bit(): """ Check that Time object really holds more precision than float64 by looking at the (naively) summed 64-bit result and asserting equality at the bit level. """ t1 = Time(1.23456789e11, format='cxcsec') t2 = t1 + dt_tiny assert t1.jd == t2.jd def test_through_scale_change(): """Check that precision holds through scale change (cxcsec is TT)""" t0 = Time(1.0, format='cxcsec') t1 = Time(1.23456789e11, format='cxcsec') dt_tt = t1 - t0 dt_tai = t1.tai - t0.tai assert allclose_jd(dt_tt.jd1, dt_tai.jd1) assert allclose_jd2(dt_tt.jd2, dt_tai.jd2) def test_iso_init(): """Check when initializing from ISO date""" t1 = Time('2000:001:00:00:00.00000001', scale='tai') t2 = Time('3000:001:13:00:00.00000002', scale='tai') dt = t2 - t1 assert allclose_jd2(dt.jd2, 13. / 24. + 1e-8 / 86400. - 1.0) def test_jd1_is_mult_of_half_or_one(): """ Check that jd1 is a multiple of 0.5 (note the difference from when Time is created with a format like 'jd' or 'cxcsec', where jd1 is a multiple of 1.0). """ t1 = Time('2000:001:00:00:00.00000001', scale='tai') assert np.round(t1.jd1 * 2) == t1.jd1 * 2 t1 = Time(1.23456789, 12345678.90123456, format='jd', scale='tai') assert np.round(t1.jd1) == t1.jd1 @pytest.mark.xfail def test_precision_neg(): """ Check precision when jd1 is negative. Currently fails because ERFA routines use a test like jd1 > jd2 to decide which component to update. Should be abs(jd1) > abs(jd2). """ t1 = Time(-100000.123456, format='jd', scale='tt') assert np.round(t1.jd1) == t1.jd1 t1_tai = t1.tai assert np.round(t1_tai.jd1) == t1_tai.jd1 def test_precision_epoch(): """ Check that input via epoch also has full precision, i.e., against regression on https://github.com/astropy/astropy/pull/366 """ t_utc = Time(range(1980, 2001), format='jyear', scale='utc') t_tai = Time(range(1980, 2001), format='jyear', scale='tai') dt = t_utc - t_tai assert allclose_sec(dt.sec, np.round(dt.sec)) def test_leap_seconds_rounded_correctly(): """Regression tests against #2083, where a leap second was rounded incorrectly by the underlying ERFA routine.""" t = Time(['2012-06-30 23:59:59.413', '2012-07-01 00:00:00.413'], scale='ut1', precision=3).utc assert np.all(t.iso == np.array(['2012-06-30 23:59:60.000', '2012-07-01 00:00:00.000'])) # with the bug, both yielded '2012-06-30 23:59:60.000'
92e18ad8ec8dd88bc3005dd020d38258235d99ec87d4bd6aa79ba4bab7a2adff
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from ... import units as u from ...coordinates import EarthLocation, SkyCoord, solar_system_ephemeris from .. import Time, TimeDelta try: import jplephem # pylint: disable=W0611 except ImportError: HAS_JPLEPHEM = False else: HAS_JPLEPHEM = True class TestHelioBaryCentric(): """ Verify time offsets to the solar system barycentre and the heliocentre. Uses the WHT observing site. Tests are against values returned at time of initial creation of these routines. They agree to an independent SLALIB based implementation to 20 microseconds. """ def setup(self): wht = EarthLocation(342.12*u.deg, 28.758333333333333*u.deg, 2327*u.m) self.obstime = Time("2013-02-02T23:00", location=wht) self.obstime2 = Time("2013-08-02T23:00", location=wht) self.obstimeArr = Time(["2013-02-02T23:00", "2013-08-02T23:00"], location=wht) self.star = SkyCoord("08:08:08 +32:00:00", unit=(u.hour, u.degree), frame='icrs') def test_heliocentric(self): hval = self.obstime.light_travel_time(self.star, 'heliocentric') assert isinstance(hval, TimeDelta) assert hval.scale == 'tdb' assert abs(hval - 461.43037870502235 * u.s) < 1. * u.us def test_barycentric(self): bval = self.obstime.light_travel_time(self.star, 'barycentric') assert isinstance(bval, TimeDelta) assert bval.scale == 'tdb' assert abs(bval - 460.58538779827836 * u.s) < 1. * u.us def test_arrays(self): bval1 = self.obstime.light_travel_time(self.star, 'barycentric') bval2 = self.obstime2.light_travel_time(self.star, 'barycentric') bval_arr = self.obstimeArr.light_travel_time(self.star, 'barycentric') hval1 = self.obstime.light_travel_time(self.star, 'heliocentric') hval2 = self.obstime2.light_travel_time(self.star, 'heliocentric') hval_arr = self.obstimeArr.light_travel_time(self.star, 'heliocentric') assert hval_arr[0]-hval1 < 1. * u.us assert hval_arr[1]-hval2 < 1. * u.us assert bval_arr[0]-bval1 < 1. * u.us assert bval_arr[1]-bval2 < 1. * u.us @pytest.mark.remote_data @pytest.mark.skipif('not HAS_JPLEPHEM') def test_ephemerides(self): bval1 = self.obstime.light_travel_time(self.star, 'barycentric') with solar_system_ephemeris.set('jpl'): bval2 = self.obstime.light_travel_time(self.star, 'barycentric', ephemeris='jpl') # should differ by less than 0.1 ms, but not be the same assert abs(bval1 - bval2) < 1. * u.ms assert abs(bval1 - bval2) > 1. * u.us
73cacdb1f7dce0b9e3cb1d2015f7f59ac56712d81e2fc24761751e315df11e5b
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pickle import numpy as np from .. import Time class TestPickle(): """Basic pickle test of time""" def test_pickle(self): times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00'] t1 = Time(times, scale='utc') for prot in range(pickle.HIGHEST_PROTOCOL): t1d = pickle.dumps(t1, prot) t1l = pickle.loads(t1d) assert np.all(t1l == t1) t2 = Time('2012-06-30 12:00:00', scale='utc') for prot in range(pickle.HIGHEST_PROTOCOL): t2d = pickle.dumps(t2, prot) t2l = pickle.loads(t2d) assert t2l == t2
a8062fb85c8b4cffa9a4fd085264ea8208bad4e8ee9411db3601b4016ad39a96
# Licensed under a 3-clause BSD style license - see LICENSE.rst import functools import pytest import numpy as np from .. import Time from ...utils.iers import iers # used in testing allclose_jd = functools.partial(np.allclose, rtol=0, atol=1e-9) allclose_sec = functools.partial(np.allclose, rtol=1e-15, atol=1e-4) # 0.1 ms atol; IERS-B files change at that level. try: iers.IERS_A.open() # check if IERS_A is available except OSError: HAS_IERS_A = False else: HAS_IERS_A = True class TestTimeUT1(): """Test Time.ut1 using IERS tables""" @pytest.mark.remote_data def test_utc_to_ut1(self): "Test conversion of UTC to UT1, making sure to include a leap second""" t = Time(['2012-06-30 12:00:00', '2012-06-30 23:59:59', '2012-06-30 23:59:60', '2012-07-01 00:00:00', '2012-07-01 12:00:00'], scale='utc') t_ut1_jd = t.ut1.jd t_comp = np.array([2456108.9999932079, 2456109.4999816339, 2456109.4999932083, 2456109.5000047823, 2456110.0000047833]) assert allclose_jd(t_ut1_jd, t_comp) t_back = t.ut1.utc assert allclose_jd(t.jd, t_back.jd) tnow = Time.now() tnow.ut1 def test_ut1_to_utc(self): """Also test the reverse, around the leap second (round-trip test closes #2077)""" t = Time(['2012-06-30 12:00:00', '2012-06-30 23:59:59', '2012-07-01 00:00:00', '2012-07-01 00:00:01', '2012-07-01 12:00:00'], scale='ut1') t_utc_jd = t.utc.jd t_comp = np.array([2456109.0000010049, 2456109.4999836441, 2456109.4999952177, 2456109.5000067917, 2456109.9999952167]) assert allclose_jd(t_utc_jd, t_comp) t_back = t.utc.ut1 assert allclose_jd(t.jd, t_back.jd) def test_delta_ut1_utc(self): """Accessing delta_ut1_utc should try to get it from IERS (closes #1924 partially)""" t = Time('2012-06-30 12:00:00', scale='utc') assert not hasattr(t, '_delta_ut1_utc') # accessing delta_ut1_utc calculates it assert allclose_sec(t.delta_ut1_utc, -0.58682110003124965) # and keeps it around assert allclose_sec(t._delta_ut1_utc, -0.58682110003124965) @pytest.mark.skipif('not HAS_IERS_A') class TestTimeUT1_IERSA(): def test_ut1_iers_A(self): tnow = Time.now() iers_a = iers.IERS_A.open() tnow.delta_ut1_utc, status = iers_a.ut1_utc(tnow, return_status=True) assert status == iers.FROM_IERS_A_PREDICTION tnow_ut1_jd = tnow.ut1.jd assert tnow_ut1_jd != tnow.jd @pytest.mark.remote_data class TestTimeUT1_IERS_Auto(): def test_ut1_iers_auto(self): tnow = Time.now() iers_a = iers.IERS_Auto.open() tnow.delta_ut1_utc, status = iers_a.ut1_utc(tnow, return_status=True) assert status == iers.FROM_IERS_A_PREDICTION tnow_ut1_jd = tnow.ut1.jd assert tnow_ut1_jd != tnow.jd
58541290d2cd443d982f07b04b7e21c3761753102382be24b30f7a203119cdb9
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from .. import Time class TestGuess(): """Test guessing the input value format""" def test_guess1(self): times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00'] t = Time(times, scale='utc') assert (repr(t) == "<Time object: scale='utc' format='iso' " "value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>") def test_guess2(self): times = ['1999-01-01 00:00:00.123456789', '2010-01 00:00:00'] with pytest.raises(ValueError): Time(times, scale='utc') def test_guess3(self): times = ['1999:001:00:00:00.123456789', '2010:001'] t = Time(times, scale='utc') assert (repr(t) == "<Time object: scale='utc' format='yday' " "value=['1999:001:00:00:00.123' '2010:001:00:00:00.000']>") def test_guess4(self): times = [10, 20] with pytest.raises(ValueError): Time(times, scale='utc')
b3f3868d31cc73ff7f43663521cd63615d6167f2cc5a2b765131a21a930ac4b3
# Licensed under a 3-clause BSD style license - see LICENSE.rst import functools import pytest import numpy as np from .. import Time, TimeDelta, OperandTypeError from ... import units as u from ...table import Column allclose_sec = functools.partial(np.allclose, rtol=2. ** -52, atol=2. ** -52 * 24 * 3600) # 20 ps atol class TestTimeQuantity(): """Test Interaction of Time with Quantities""" def test_valid_quantity_input(self): """Test Time formats that are allowed to take quantity input.""" q = 2450000.125*u.day t1 = Time(q, format='jd', scale='utc') assert t1.value == q.value q2 = q.to(u.second) t2 = Time(q2, format='jd', scale='utc') assert t2.value == q.value == q2.to_value(u.day) q3 = q-2400000.5*u.day t3 = Time(q3, format='mjd', scale='utc') assert t3.value == q3.value # test we can deal with two quantity arguments, with different units qs = 24.*36.*u.second t4 = Time(q3, qs, format='mjd', scale='utc') assert t4.value == (q3+qs).to_value(u.day) qy = 1990.*u.yr ty1 = Time(qy, format='jyear', scale='utc') assert ty1.value == qy.value ty2 = Time(qy.to(u.day), format='jyear', scale='utc') assert ty2.value == qy.value qy2 = 10.*u.yr tcxc = Time(qy2, format='cxcsec') assert tcxc.value == qy2.to_value(u.second) tgps = Time(qy2, format='gps') assert tgps.value == qy2.to_value(u.second) tunix = Time(qy2, format='unix') assert tunix.value == qy2.to_value(u.second) qd = 2000.*365.*u.day tplt = Time(qd, format='plot_date', scale='utc') assert tplt.value == qd.value def test_invalid_quantity_input(self): with pytest.raises(u.UnitsError): Time(2450000.*u.m, format='jd', scale='utc') with pytest.raises(u.UnitsError): Time(2450000.*u.dimensionless_unscaled, format='jd', scale='utc') def test_column_with_and_without_units(self): """Ensure a Column without a unit is treated as an array [#3648]""" a = np.arange(50000., 50010.) ta = Time(a, format='mjd') c1 = Column(np.arange(50000., 50010.), name='mjd') tc1 = Time(c1, format='mjd') assert np.all(ta == tc1) c2 = Column(np.arange(50000., 50010.), name='mjd', unit='day') tc2 = Time(c2, format='mjd') assert np.all(ta == tc2) c3 = Column(np.arange(50000., 50010.), name='mjd', unit='m') with pytest.raises(u.UnitsError): Time(c3, format='mjd') def test_no_quantity_input_allowed(self): """Time formats that are not allowed to take Quantity input.""" qy = 1990.*u.yr for fmt in ('iso', 'yday', 'datetime', 'byear', 'byear_str', 'jyear_str'): with pytest.raises(ValueError): Time(qy, format=fmt, scale='utc') def test_valid_quantity_operations(self): """Check that adding a time-valued quantity to a Time gives a Time""" t0 = Time(100000., format='cxcsec') q1 = 10.*u.second t1 = t0 + q1 assert isinstance(t1, Time) assert t1.value == t0.value+q1.to_value(u.second) q2 = 1.*u.day t2 = t0 - q2 assert allclose_sec(t2.value, t0.value-q2.to_value(u.second)) # check broadcasting q3 = np.arange(15.).reshape(3, 5) * u.hour t3 = t0 - q3 assert t3.shape == q3.shape assert allclose_sec(t3.value, t0.value-q3.to_value(u.second)) def test_invalid_quantity_operations(self): """Check that comparisons of Time with quantities does not work (even for time-like, since we cannot compare Time to TimeDelta)""" with pytest.raises(OperandTypeError): Time(100000., format='cxcsec') > 10.*u.m with pytest.raises(OperandTypeError): Time(100000., format='cxcsec') > 10.*u.second class TestTimeDeltaQuantity(): """Test interaction of TimeDelta with Quantities""" def test_valid_quantity_input(self): """Test that TimeDelta can take quantity input.""" q = 500.25*u.day dt1 = TimeDelta(q, format='jd') assert dt1.value == q.value dt2 = TimeDelta(q, format='sec') assert dt2.value == q.to_value(u.second) dt3 = TimeDelta(q) assert dt3.value == q.value def test_invalid_quantity_input(self): with pytest.raises(u.UnitsError): TimeDelta(2450000.*u.m, format='jd') with pytest.raises(u.UnitsError): Time(2450000.*u.dimensionless_unscaled, format='jd', scale='utc') with pytest.raises(OperandTypeError): TimeDelta(100, format='sec') > 10.*u.m def test_quantity_output(self): q = 500.25*u.day dt = TimeDelta(q) assert dt.to(u.day) == q assert dt.to(u.second).value == q.to_value(u.second) with pytest.raises(u.UnitsError): dt.to(u.m) def test_valid_quantity_operations1(self): """Check adding/substracting/comparing a time-valued quantity works with a TimeDelta. Addition/subtraction should give TimeDelta""" t0 = TimeDelta(106400., format='sec') q1 = 10.*u.second t1 = t0 + q1 assert isinstance(t1, TimeDelta) assert t1.value == t0.value+q1.to_value(u.second) q2 = 1.*u.day t2 = t0 - q2 assert allclose_sec(t2.value, t0.value-q2.to_value(u.second)) # now comparisons assert t0 > q1 assert t0 < 1.*u.yr # and broadcasting q3 = np.arange(12.).reshape(4, 3) * u.hour t3 = t0 + q3 assert t3.shape == q3.shape assert allclose_sec(t3.value, t0.value + q3.to_value(u.second)) def test_valid_quantity_operations2(self): """Check that TimeDelta is treated as a quantity where possible.""" t0 = TimeDelta(100000., format='sec') f = 1./t0 assert isinstance(f, u.Quantity) assert f.unit == 1./u.day g = 10.*u.m/u.second**2 v = t0 * g assert isinstance(v, u.Quantity) assert v.decompose().unit == u.m / u.second q = np.log10(t0/u.second) assert isinstance(q, u.Quantity) assert q.value == np.log10(t0.sec) s = 1.*u.m v = s/t0 assert isinstance(v, u.Quantity) assert v.decompose().unit == u.m / u.second # broadcasting t1 = TimeDelta(np.arange(100000., 100012.).reshape(6, 2), format='sec') f = np.array([1., 2.]) * u.cycle * u.Hz phase = f * t1 assert isinstance(phase, u.Quantity) assert phase.shape == t1.shape assert phase.unit.is_equivalent(u.cycle) def test_invalid_quantity_operations(self): """Check comparisons of TimeDelta with non-time quantities fails.""" with pytest.raises(OperandTypeError): TimeDelta(100000., format='sec') > 10.*u.m def test_invalid_quantity_broadcast(self): """Check broadcasting rules in interactions with Quantity.""" t0 = TimeDelta(np.arange(12.).reshape(4, 3), format='sec') with pytest.raises(ValueError): t0 + np.arange(4.) * u.s class TestDeltaAttributes(): def test_delta_ut1_utc(self): t = Time('2010-01-01 00:00:00', format='iso', scale='utc', precision=6) t.delta_ut1_utc = 0.3 * u.s assert t.ut1.iso == '2010-01-01 00:00:00.300000' t.delta_ut1_utc = 0.4 / 60. * u.minute assert t.ut1.iso == '2010-01-01 00:00:00.400000' with pytest.raises(u.UnitsError): t.delta_ut1_utc = 0.4 * u.m # Also check that a TimeDelta works. t.delta_ut1_utc = TimeDelta(0.3, format='sec') assert t.ut1.iso == '2010-01-01 00:00:00.300000' t.delta_ut1_utc = TimeDelta(0.5/24./3600., format='jd') assert t.ut1.iso == '2010-01-01 00:00:00.500000' def test_delta_tdb_tt(self): t = Time('2010-01-01 00:00:00', format='iso', scale='tt', precision=6) t.delta_tdb_tt = 20. * u.second assert t.tdb.iso == '2010-01-01 00:00:20.000000' t.delta_tdb_tt = 30. / 60. * u.minute assert t.tdb.iso == '2010-01-01 00:00:30.000000' with pytest.raises(u.UnitsError): t.delta_tdb_tt = 0.4 * u.m # Also check that a TimeDelta works. t.delta_tdb_tt = TimeDelta(40., format='sec') assert t.tdb.iso == '2010-01-01 00:00:40.000000' t.delta_tdb_tt = TimeDelta(50./24./3600., format='jd') assert t.tdb.iso == '2010-01-01 00:00:50.000000'
7178fb72abc0bf29e997f35992e59e859cc60a27b6cb6f6fe8f5ad0d877ef903
# Licensed under a 3-clause BSD style license - see LICENSE.rst import operator import pytest import numpy as np from .. import Time, TimeDelta, OperandTypeError class TestTimeComparisons(): """Test Comparisons of Time and TimeDelta classes""" def setup(self): self.t1 = Time(np.arange(49995, 50005), format='mjd', scale='utc') self.t2 = Time(np.arange(49000, 51000, 200), format='mjd', scale='utc') def test_miscompares(self): """ If an incompatible object is compared to a Time object, == should return False and != should return True. All other comparison operators should raise an OperandTypeError. """ t1 = Time('J2000', scale='utc') for op, op_str in ((operator.ge, '>='), (operator.gt, '>'), (operator.le, '<='), (operator.lt, '<')): with pytest.raises(OperandTypeError) as err: op(t1, None) assert str(err).endswith("Unsupported operand type(s) for {0}: 'Time' and 'NoneType'" .format(op_str)) # Keep == and != as they are specifically meant to test Time.__eq__ # and Time.__ne__ assert (t1 == None) is False # nopep8 assert (t1 != None) is True # nopep8 def test_time(self): t1_lt_t2 = self.t1 < self.t2 assert np.all(t1_lt_t2 == np.array([False, False, False, False, False, False, True, True, True, True])) t1_ge_t2 = self.t1 >= self.t2 assert np.all(t1_ge_t2 != t1_lt_t2) t1_le_t2 = self.t1 <= self.t2 assert np.all(t1_le_t2 == np.array([False, False, False, False, False, True, True, True, True, True])) t1_gt_t2 = self.t1 > self.t2 assert np.all(t1_gt_t2 != t1_le_t2) t1_eq_t2 = self.t1 == self.t2 assert np.all(t1_eq_t2 == np.array([False, False, False, False, False, True, False, False, False, False])) t1_ne_t2 = self.t1 != self.t2 assert np.all(t1_ne_t2 != t1_eq_t2) t1_0_gt_t2_0 = self.t1[0] > self.t2[0] assert t1_0_gt_t2_0 is True t1_0_gt_t2 = self.t1[0] > self.t2 assert np.all(t1_0_gt_t2 == np.array([True, True, True, True, True, False, False, False, False, False])) t1_gt_t2_0 = self.t1 > self.t2[0] assert np.all(t1_gt_t2_0 == np.array([True, True, True, True, True, True, True, True, True, True])) def test_timedelta(self): dt = self.t2 - self.t1 with pytest.raises(OperandTypeError): self.t1 > dt dt_gt_td0 = dt > TimeDelta(0., format='sec') assert np.all(dt_gt_td0 == np.array([False, False, False, False, False, False, True, True, True, True]))
e17814afa04e7d241d2176d963b724ab81b12bdf4ecec688c99e71d8549d37d4
# Licensed under a 3-clause BSD style license - see LICENSE.rst import itertools import copy import pytest import numpy as np from .. import Time @pytest.fixture(scope="module", params=[True, False]) def masked(request): # Could not figure out a better way to parametrize the setup method global use_masked_data use_masked_data = request.param yield use_masked_data class TestManipulation(): """Manipulation of Time objects, ensuring attributes are done correctly.""" def setup(self): mjd = np.arange(50000, 50010) frac = np.arange(0., 0.999, 0.2) if use_masked_data: frac = np.ma.array(frac) frac[1] = np.ma.masked self.t0 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc') self.t1 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc', location=('45d', '50d')) self.t2 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc', location=(np.arange(len(frac)), np.arange(len(frac)))) # Note: location is along last axis only. self.t2 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc', location=(np.arange(len(frac)), np.arange(len(frac)))) def test_ravel(self, masked): t0_ravel = self.t0.ravel() assert t0_ravel.shape == (self.t0.size,) assert np.all(t0_ravel.jd1 == self.t0.jd1.ravel()) assert np.may_share_memory(t0_ravel.jd1, self.t0.jd1) assert t0_ravel.location is None t1_ravel = self.t1.ravel() assert t1_ravel.shape == (self.t1.size,) assert np.all(t1_ravel.jd1 == self.t1.jd1.ravel()) assert np.may_share_memory(t1_ravel.jd1, self.t1.jd1) assert t1_ravel.location is self.t1.location t2_ravel = self.t2.ravel() assert t2_ravel.shape == (self.t2.size,) assert np.all(t2_ravel.jd1 == self.t2.jd1.ravel()) assert np.may_share_memory(t2_ravel.jd1, self.t2.jd1) assert t2_ravel.location.shape == t2_ravel.shape # Broadcasting and ravelling cannot be done without a copy. assert not np.may_share_memory(t2_ravel.location, self.t2.location) def test_flatten(self, masked): t0_flatten = self.t0.flatten() assert t0_flatten.shape == (self.t0.size,) assert t0_flatten.location is None # Flatten always makes a copy. assert not np.may_share_memory(t0_flatten.jd1, self.t0.jd1) t1_flatten = self.t1.flatten() assert t1_flatten.shape == (self.t1.size,) assert not np.may_share_memory(t1_flatten.jd1, self.t1.jd1) assert t1_flatten.location is not self.t1.location assert t1_flatten.location == self.t1.location t2_flatten = self.t2.flatten() assert t2_flatten.shape == (self.t2.size,) assert not np.may_share_memory(t2_flatten.jd1, self.t2.jd1) assert t2_flatten.location.shape == t2_flatten.shape assert not np.may_share_memory(t2_flatten.location, self.t2.location) def test_transpose(self, masked): t0_transpose = self.t0.transpose() assert t0_transpose.shape == (5, 10) assert np.all(t0_transpose.jd1 == self.t0.jd1.transpose()) assert np.may_share_memory(t0_transpose.jd1, self.t0.jd1) assert t0_transpose.location is None t1_transpose = self.t1.transpose() assert t1_transpose.shape == (5, 10) assert np.all(t1_transpose.jd1 == self.t1.jd1.transpose()) assert np.may_share_memory(t1_transpose.jd1, self.t1.jd1) assert t1_transpose.location is self.t1.location t2_transpose = self.t2.transpose() assert t2_transpose.shape == (5, 10) assert np.all(t2_transpose.jd1 == self.t2.jd1.transpose()) assert np.may_share_memory(t2_transpose.jd1, self.t2.jd1) assert t2_transpose.location.shape == t2_transpose.shape assert np.may_share_memory(t2_transpose.location, self.t2.location) # Only one check on T, since it just calls transpose anyway. t2_T = self.t2.T assert t2_T.shape == (5, 10) assert np.all(t2_T.jd1 == self.t2.jd1.T) assert np.may_share_memory(t2_T.jd1, self.t2.jd1) assert t2_T.location.shape == t2_T.location.shape assert np.may_share_memory(t2_T.location, self.t2.location) def test_diagonal(self, masked): t0_diagonal = self.t0.diagonal() assert t0_diagonal.shape == (5,) assert np.all(t0_diagonal.jd1 == self.t0.jd1.diagonal()) assert t0_diagonal.location is None assert np.may_share_memory(t0_diagonal.jd1, self.t0.jd1) t1_diagonal = self.t1.diagonal() assert t1_diagonal.shape == (5,) assert np.all(t1_diagonal.jd1 == self.t1.jd1.diagonal()) assert t1_diagonal.location is self.t1.location assert np.may_share_memory(t1_diagonal.jd1, self.t1.jd1) t2_diagonal = self.t2.diagonal() assert t2_diagonal.shape == (5,) assert np.all(t2_diagonal.jd1 == self.t2.jd1.diagonal()) assert t2_diagonal.location.shape == t2_diagonal.shape assert np.may_share_memory(t2_diagonal.jd1, self.t2.jd1) assert np.may_share_memory(t2_diagonal.location, self.t2.location) def test_swapaxes(self, masked): t0_swapaxes = self.t0.swapaxes(0, 1) assert t0_swapaxes.shape == (5, 10) assert np.all(t0_swapaxes.jd1 == self.t0.jd1.swapaxes(0, 1)) assert np.may_share_memory(t0_swapaxes.jd1, self.t0.jd1) assert t0_swapaxes.location is None t1_swapaxes = self.t1.swapaxes(0, 1) assert t1_swapaxes.shape == (5, 10) assert np.all(t1_swapaxes.jd1 == self.t1.jd1.swapaxes(0, 1)) assert np.may_share_memory(t1_swapaxes.jd1, self.t1.jd1) assert t1_swapaxes.location is self.t1.location t2_swapaxes = self.t2.swapaxes(0, 1) assert t2_swapaxes.shape == (5, 10) assert np.all(t2_swapaxes.jd1 == self.t2.jd1.swapaxes(0, 1)) assert np.may_share_memory(t2_swapaxes.jd1, self.t2.jd1) assert t2_swapaxes.location.shape == t2_swapaxes.shape assert np.may_share_memory(t2_swapaxes.location, self.t2.location) def test_reshape(self, masked): t0_reshape = self.t0.reshape(5, 2, 5) assert t0_reshape.shape == (5, 2, 5) assert np.all(t0_reshape.jd1 == self.t0._time.jd1.reshape(5, 2, 5)) assert np.all(t0_reshape.jd2 == self.t0._time.jd2.reshape(5, 2, 5)) assert np.may_share_memory(t0_reshape.jd1, self.t0.jd1) assert np.may_share_memory(t0_reshape.jd2, self.t0.jd2) assert t0_reshape.location is None t1_reshape = self.t1.reshape(2, 5, 5) assert t1_reshape.shape == (2, 5, 5) assert np.all(t1_reshape.jd1 == self.t1.jd1.reshape(2, 5, 5)) assert np.may_share_memory(t1_reshape.jd1, self.t1.jd1) assert t1_reshape.location is self.t1.location # For reshape(5, 2, 5), the location array can remain the same. t2_reshape = self.t2.reshape(5, 2, 5) assert t2_reshape.shape == (5, 2, 5) assert np.all(t2_reshape.jd1 == self.t2.jd1.reshape(5, 2, 5)) assert np.may_share_memory(t2_reshape.jd1, self.t2.jd1) assert t2_reshape.location.shape == t2_reshape.shape assert np.may_share_memory(t2_reshape.location, self.t2.location) # But for reshape(5, 5, 2), location has to be broadcast and copied. t2_reshape2 = self.t2.reshape(5, 5, 2) assert t2_reshape2.shape == (5, 5, 2) assert np.all(t2_reshape2.jd1 == self.t2.jd1.reshape(5, 5, 2)) assert np.may_share_memory(t2_reshape2.jd1, self.t2.jd1) assert t2_reshape2.location.shape == t2_reshape2.shape assert not np.may_share_memory(t2_reshape2.location, self.t2.location) t2_reshape_t = self.t2.reshape(10, 5).T assert t2_reshape_t.shape == (5, 10) assert np.may_share_memory(t2_reshape_t.jd1, self.t2.jd1) assert t2_reshape_t.location.shape == t2_reshape_t.shape assert np.may_share_memory(t2_reshape_t.location, self.t2.location) # Finally, reshape in a way that cannot be a view. t2_reshape_t_reshape = t2_reshape_t.reshape(10, 5) assert t2_reshape_t_reshape.shape == (10, 5) assert not np.may_share_memory(t2_reshape_t_reshape.jd1, self.t2.jd1) assert (t2_reshape_t_reshape.location.shape == t2_reshape_t_reshape.shape) assert not np.may_share_memory(t2_reshape_t_reshape.location, t2_reshape_t.location) def test_shape_setting(self, masked): t0_reshape = self.t0.copy() mjd = t0_reshape.mjd # Creates a cache of the mjd attribute t0_reshape.shape = (5, 2, 5) assert t0_reshape.shape == (5, 2, 5) assert mjd.shape != t0_reshape.mjd.shape # Cache got cleared assert np.all(t0_reshape.jd1 == self.t0._time.jd1.reshape(5, 2, 5)) assert np.all(t0_reshape.jd2 == self.t0._time.jd2.reshape(5, 2, 5)) assert t0_reshape.location is None # But if the shape doesn't work, one should get an error. t0_reshape_t = t0_reshape.T with pytest.raises(AttributeError): t0_reshape_t.shape = (10, 5) # check no shape was changed. assert t0_reshape_t.shape == t0_reshape.T.shape assert t0_reshape_t.jd1.shape == t0_reshape.T.shape assert t0_reshape_t.jd2.shape == t0_reshape.T.shape t1_reshape = self.t1.copy() t1_reshape.shape = (2, 5, 5) assert t1_reshape.shape == (2, 5, 5) assert np.all(t1_reshape.jd1 == self.t1.jd1.reshape(2, 5, 5)) # location is a single element, so its shape should not change. assert t1_reshape.location.shape == () # For reshape(5, 2, 5), the location array can remain the same. # Note that we need to work directly on self.t2 here, since any # copy would cause location to have the full shape. self.t2.shape = (5, 2, 5) assert self.t2.shape == (5, 2, 5) assert self.t2.jd1.shape == (5, 2, 5) assert self.t2.jd2.shape == (5, 2, 5) assert self.t2.location.shape == (5, 2, 5) assert self.t2.location.strides == (0, 0, 24) # But for reshape(50), location would need to be copied, so this # should fail. oldshape = self.t2.shape with pytest.raises(AttributeError): self.t2.shape = (50,) # check no shape was changed. assert self.t2.jd1.shape == oldshape assert self.t2.jd2.shape == oldshape assert self.t2.location.shape == oldshape # reset t2 to its original. self.setup() def test_squeeze(self, masked): t0_squeeze = self.t0.reshape(5, 1, 2, 1, 5).squeeze() assert t0_squeeze.shape == (5, 2, 5) assert np.all(t0_squeeze.jd1 == self.t0.jd1.reshape(5, 2, 5)) assert np.may_share_memory(t0_squeeze.jd1, self.t0.jd1) assert t0_squeeze.location is None t1_squeeze = self.t1.reshape(1, 5, 1, 2, 5).squeeze() assert t1_squeeze.shape == (5, 2, 5) assert np.all(t1_squeeze.jd1 == self.t1.jd1.reshape(5, 2, 5)) assert np.may_share_memory(t1_squeeze.jd1, self.t1.jd1) assert t1_squeeze.location is self.t1.location t2_squeeze = self.t2.reshape(1, 1, 5, 2, 5, 1, 1).squeeze() assert t2_squeeze.shape == (5, 2, 5) assert np.all(t2_squeeze.jd1 == self.t2.jd1.reshape(5, 2, 5)) assert np.may_share_memory(t2_squeeze.jd1, self.t2.jd1) assert t2_squeeze.location.shape == t2_squeeze.shape assert np.may_share_memory(t2_squeeze.location, self.t2.location) def test_add_dimension(self, masked): t0_adddim = self.t0[:, np.newaxis, :] assert t0_adddim.shape == (10, 1, 5) assert np.all(t0_adddim.jd1 == self.t0.jd1[:, np.newaxis, :]) assert np.may_share_memory(t0_adddim.jd1, self.t0.jd1) assert t0_adddim.location is None t1_adddim = self.t1[:, :, np.newaxis] assert t1_adddim.shape == (10, 5, 1) assert np.all(t1_adddim.jd1 == self.t1.jd1[:, :, np.newaxis]) assert np.may_share_memory(t1_adddim.jd1, self.t1.jd1) assert t1_adddim.location is self.t1.location t2_adddim = self.t2[:, :, np.newaxis] assert t2_adddim.shape == (10, 5, 1) assert np.all(t2_adddim.jd1 == self.t2.jd1[:, :, np.newaxis]) assert np.may_share_memory(t2_adddim.jd1, self.t2.jd1) assert t2_adddim.location.shape == t2_adddim.shape assert np.may_share_memory(t2_adddim.location, self.t2.location) def test_take(self, masked): t0_take = self.t0.take((5, 2)) assert t0_take.shape == (2,) assert np.all(t0_take.jd1 == self.t0._time.jd1.take((5, 2))) assert t0_take.location is None t1_take = self.t1.take((2, 4), axis=1) assert t1_take.shape == (10, 2) assert np.all(t1_take.jd1 == self.t1.jd1.take((2, 4), axis=1)) assert t1_take.location is self.t1.location t2_take = self.t2.take((1, 3, 7), axis=0) assert t2_take.shape == (3, 5) assert np.all(t2_take.jd1 == self.t2.jd1.take((1, 3, 7), axis=0)) assert t2_take.location.shape == t2_take.shape t2_take2 = self.t2.take((5, 15)) assert t2_take2.shape == (2,) assert np.all(t2_take2.jd1 == self.t2.jd1.take((5, 15))) assert t2_take2.location.shape == t2_take2.shape def test_broadcast(self, masked): """Test using a callable method.""" t0_broadcast = self.t0._apply(np.broadcast_to, shape=(3, 10, 5)) assert t0_broadcast.shape == (3, 10, 5) assert np.all(t0_broadcast.jd1 == self.t0.jd1) assert np.may_share_memory(t0_broadcast.jd1, self.t0.jd1) assert t0_broadcast.location is None t1_broadcast = self.t1._apply(np.broadcast_to, shape=(3, 10, 5)) assert t1_broadcast.shape == (3, 10, 5) assert np.all(t1_broadcast.jd1 == self.t1.jd1) assert np.may_share_memory(t1_broadcast.jd1, self.t1.jd1) assert t1_broadcast.location is self.t1.location t2_broadcast = self.t2._apply(np.broadcast_to, shape=(3, 10, 5)) assert t2_broadcast.shape == (3, 10, 5) assert np.all(t2_broadcast.jd1 == self.t2.jd1) assert np.may_share_memory(t2_broadcast.jd1, self.t2.jd1) assert t2_broadcast.location.shape == t2_broadcast.shape assert np.may_share_memory(t2_broadcast.location, self.t2.location) class TestArithmetic(): """Arithmetic on Time objects, using both doubles.""" kwargs = ({}, {'axis': None}, {'axis': 0}, {'axis': 1}, {'axis': 2}) functions = ('min', 'max', 'sort') def setup(self): mjd = np.arange(50000, 50100, 10).reshape(2, 5, 1) frac = np.array([0.1, 0.1+1.e-15, 0.1-1.e-15, 0.9+2.e-16, 0.9]) if use_masked_data: frac = np.ma.array(frac) frac[1] = np.ma.masked self.t0 = Time(mjd, frac, format='mjd', scale='utc') # Define arrays with same ordinal properties frac = np.array([1, 2, 0, 4, 3]) if use_masked_data: frac = np.ma.array(frac) frac[1] = np.ma.masked self.t1 = Time(mjd + frac, format='mjd', scale='utc') self.jd = mjd + frac @pytest.mark.parametrize('kw, func', itertools.product(kwargs, functions)) def test_argfuncs(self, kw, func, masked): """ Test that np.argfunc(jd, **kw) is the same as t0.argfunc(**kw) where jd is a similarly shaped array with the same ordinal properties but all integer values. Also test the same for t1 which has the same integral values as jd. """ t0v = getattr(self.t0, 'arg' + func)(**kw) t1v = getattr(self.t1, 'arg' + func)(**kw) jdv = getattr(np, 'arg' + func)(self.jd, **kw) if self.t0.masked and kw == {'axis': None} and func == 'sort': t0v = np.ma.array(t0v, mask=self.t0.mask.reshape(t0v.shape)[t0v]) t1v = np.ma.array(t1v, mask=self.t1.mask.reshape(t1v.shape)[t1v]) jdv = np.ma.array(jdv, mask=self.jd.mask.reshape(jdv.shape)[jdv]) assert np.all(t0v == jdv) assert np.all(t1v == jdv) assert t0v.shape == jdv.shape assert t1v.shape == jdv.shape @pytest.mark.parametrize('kw, func', itertools.product(kwargs, functions)) def test_funcs(self, kw, func, masked): """ Test that np.func(jd, **kw) is the same as t1.func(**kw) where jd is a similarly shaped array and the same integral values. """ t1v = getattr(self.t1, func)(**kw) jdv = getattr(np, func)(self.jd, **kw) assert np.all(t1v.value == jdv) assert t1v.shape == jdv.shape def test_argmin(self, masked): assert self.t0.argmin() == 2 assert np.all(self.t0.argmin(axis=0) == 0) assert np.all(self.t0.argmin(axis=1) == 0) assert np.all(self.t0.argmin(axis=2) == 2) def test_argmax(self, masked): assert self.t0.argmax() == self.t0.size - 2 if masked: # The 0 is where all entries are masked in that axis assert np.all(self.t0.argmax(axis=0) == [1, 0, 1, 1, 1]) assert np.all(self.t0.argmax(axis=1) == [4, 0, 4, 4, 4]) else: assert np.all(self.t0.argmax(axis=0) == 1) assert np.all(self.t0.argmax(axis=1) == 4) assert np.all(self.t0.argmax(axis=2) == 3) def test_argsort(self, masked): order = [2, 0, 4, 3, 1] if masked else [2, 0, 1, 4, 3] assert np.all(self.t0.argsort() == np.array(order)) assert np.all(self.t0.argsort(axis=0) == np.arange(2).reshape(2, 1, 1)) assert np.all(self.t0.argsort(axis=1) == np.arange(5).reshape(5, 1)) assert np.all(self.t0.argsort(axis=2) == np.array(order)) ravel = np.arange(50).reshape(-1, 5)[:, order].ravel() if masked: t0v = self.t0.argsort(axis=None) # Manually remove elements in ravel that correspond to masked # entries in self.t0. This removes the 10 entries that are masked # which show up at the end of the list. mask = self.t0.mask.ravel()[ravel] ravel = ravel[~mask] assert np.all(t0v[:-10] == ravel) else: assert np.all(self.t0.argsort(axis=None) == ravel) def test_min(self, masked): assert self.t0.min() == self.t0[0, 0, 2] assert np.all(self.t0.min(0) == self.t0[0]) assert np.all(self.t0.min(1) == self.t0[:, 0]) assert np.all(self.t0.min(2) == self.t0[:, :, 2]) assert self.t0.min(0).shape == (5, 5) assert self.t0.min(0, keepdims=True).shape == (1, 5, 5) assert self.t0.min(1).shape == (2, 5) assert self.t0.min(1, keepdims=True).shape == (2, 1, 5) assert self.t0.min(2).shape == (2, 5) assert self.t0.min(2, keepdims=True).shape == (2, 5, 1) def test_max(self, masked): assert self.t0.max() == self.t0[-1, -1, -2] assert np.all(self.t0.max(0) == self.t0[1]) assert np.all(self.t0.max(1) == self.t0[:, 4]) assert np.all(self.t0.max(2) == self.t0[:, :, 3]) assert self.t0.max(0).shape == (5, 5) assert self.t0.max(0, keepdims=True).shape == (1, 5, 5) def test_ptp(self, masked): assert self.t0.ptp() == self.t0.max() - self.t0.min() assert np.all(self.t0.ptp(0) == self.t0.max(0) - self.t0.min(0)) assert self.t0.ptp(0).shape == (5, 5) assert self.t0.ptp(0, keepdims=True).shape == (1, 5, 5) def test_sort(self, masked): order = [2, 0, 4, 3, 1] if masked else [2, 0, 1, 4, 3] assert np.all(self.t0.sort() == self.t0[:, :, order]) assert np.all(self.t0.sort(0) == self.t0) assert np.all(self.t0.sort(1) == self.t0) assert np.all(self.t0.sort(2) == self.t0[:, :, order]) if not masked: assert np.all(self.t0.sort(None) == self.t0[:, :, order].ravel()) # Bit superfluous, but good to check. assert np.all(self.t0.sort(-1)[:, :, 0] == self.t0.min(-1)) assert np.all(self.t0.sort(-1)[:, :, -1] == self.t0.max(-1)) def test_regression(): # For #5225, where a time with a single-element delta_ut1_utc could not # be copied, flattened, or ravelled. (For copy, it is in test_basic.) t = Time(49580.0, scale='tai', format='mjd') t_ut1 = t.ut1 t_ut1_copy = copy.deepcopy(t_ut1) assert type(t_ut1_copy.delta_ut1_utc) is np.ndarray t_ut1_flatten = t_ut1.flatten() assert type(t_ut1_flatten.delta_ut1_utc) is np.ndarray t_ut1_ravel = t_ut1.ravel() assert type(t_ut1_ravel.delta_ut1_utc) is np.ndarray assert t_ut1_copy.delta_ut1_utc == t_ut1.delta_ut1_utc
2c6819975a6b0e86c30b2a814ddc952b990d1e2dcb564573b5a738105ae21f46
# Licensed under a 3-clause BSD style license - see LICENSE.rst import functools import numpy as np from ...utils.compat import NUMPY_LT_1_14 from ...tests.helper import pytest from .. import Time from ...table import Table try: import h5py # pylint: disable=W0611 except ImportError: HAS_H5PY = False else: HAS_H5PY = True try: import yaml # pylint: disable=W0611 HAS_YAML = True except ImportError: HAS_YAML = False allclose_sec = functools.partial(np.allclose, rtol=2. ** -52, atol=2. ** -52 * 24 * 3600) # 20 ps atol is_masked = np.ma.is_masked def test_simple(): t = Time([1, 2, 3], format='cxcsec') assert t.masked is False assert np.all(t.mask == [False, False, False]) # Before masking, format output is not a masked array (it is an ndarray # like always) assert not isinstance(t.value, np.ma.MaskedArray) assert not isinstance(t.unix, np.ma.MaskedArray) t[2] = np.ma.masked assert t.masked is True assert np.all(t.mask == [False, False, True]) assert allclose_sec(t.value[:2], [1, 2]) assert is_masked(t.value[2]) assert is_masked(t[2].value) # After masking format output is a masked array assert isinstance(t.value, np.ma.MaskedArray) assert isinstance(t.unix, np.ma.MaskedArray) # Todo : test all formats def test_scalar_init(): t = Time('2000:001') assert t.masked is False assert t.mask == np.array(False) def test_mask_not_writeable(): t = Time('2000:001') with pytest.raises(AttributeError) as err: t.mask = True assert "can't set attribute" in str(err) t = Time(['2000:001']) with pytest.raises(ValueError) as err: t.mask[0] = True assert "assignment destination is read-only" in str(err) def test_str(): t = Time(['2000:001', '2000:002']) t[1] = np.ma.masked assert str(t) == "['2000:001:00:00:00.000' --]" assert repr(t) == "<Time object: scale='utc' format='yday' value=['2000:001:00:00:00.000' --]>" if NUMPY_LT_1_14: expected = ["masked_array(data = ['2000-01-01 00:00:00.000' --],", " mask = [False True],", " fill_value = N/A)"] else: expected = ["masked_array(data=['2000-01-01 00:00:00.000', --],", ' mask=[False, True],', " fill_value='N/A',", " dtype='<U23')"] assert repr(t.iso).splitlines() == expected # Assign value to unmask t[1] = '2000:111' assert str(t) == "['2000:001:00:00:00.000' '2000:111:00:00:00.000']" assert t.masked is False def test_transform(): t = Time(['2000:001', '2000:002']) t[1] = np.ma.masked # Change scale (this tests the ERFA machinery with masking as well) t_ut1 = t.ut1 assert is_masked(t_ut1.value[1]) assert not is_masked(t_ut1.value[0]) assert np.all(t_ut1.mask == [False, True]) # Change format t_unix = t.unix assert is_masked(t_unix[1]) assert not is_masked(t_unix[0]) assert np.all(t_unix.mask == [False, True]) def test_masked_input(): v0 = np.ma.MaskedArray([[1, 2], [3, 4]]) # No masked elements v1 = np.ma.MaskedArray([[1, 2], [3, 4]], mask=[[True, False], [False, False]]) v2 = np.ma.MaskedArray([[10, 20], [30, 40]], mask=[[False, False], [False, True]]) # Init from various combinations of masked arrays t = Time(v0, format='cxcsec') assert np.ma.allclose(t.value, v0) assert np.all(t.mask == [[False, False], [False, False]]) assert t.masked is False t = Time(v1, format='cxcsec') assert np.ma.allclose(t.value, v1) assert np.all(t.mask == v1.mask) assert np.all(t.value.mask == v1.mask) assert t.masked is True t = Time(v1, v2, format='cxcsec') assert np.ma.allclose(t.value, v1 + v2) assert np.all(t.mask == (v1 + v2).mask) assert t.masked is True t = Time(v0, v1, format='cxcsec') assert np.ma.allclose(t.value, v0 + v1) assert np.all(t.mask == (v0 + v1).mask) assert t.masked is True t = Time(0, v2, format='cxcsec') assert np.ma.allclose(t.value, v2) assert np.all(t.mask == v2.mask) assert t.masked is True # Init from a string masked array t_iso = t.iso t2 = Time(t_iso) assert np.all(t2.value == t_iso) assert np.all(t2.mask == v2.mask) assert t2.masked is True def test_serialize_fits_masked(tmpdir): tm = Time([1, 2, 3], format='cxcsec') tm[1] = np.ma.masked fn = str(tmpdir.join('tempfile.fits')) t = Table([tm]) t.write(fn) t2 = Table.read(fn, astropy_native=True) # Time FITS handling does not current round-trip format in FITS t2['col0'].format = tm.format assert t2['col0'].masked assert np.all(t2['col0'].mask == [False, True, False]) assert np.all(t2['col0'].value == t['col0'].value) @pytest.mark.skipif('not HAS_H5PY') def test_serialize_hdf5_masked(tmpdir): tm = Time([1, 2, 3], format='cxcsec') tm[1] = np.ma.masked fn = str(tmpdir.join('tempfile.hdf5')) t = Table([tm]) t.write(fn, path='root', serialize_meta=True) t2 = Table.read(fn) assert t2['col0'].masked assert np.all(t2['col0'].mask == [False, True, False]) assert np.all(t2['col0'].value == t['col0'].value) @pytest.mark.skipif('not HAS_YAML') def test_serialize_ecsv_masked(tmpdir): tm = Time([1, 2, 3], format='cxcsec') tm[1] = np.ma.masked # Serializing in the default way for ECSV fails to round-trip # because it writes out a "nan" instead of "". But for jd1/jd2 # this works OK. tm.info.serialize_method['ecsv'] = 'jd1_jd2' fn = str(tmpdir.join('tempfile.ecsv')) t = Table([tm]) t.write(fn) t2 = Table.read(fn) assert t2['col0'].masked assert np.all(t2['col0'].mask == [False, True, False]) # Serializing floats to ASCII loses some precision so use allclose # and 1e-7 seconds tolerance. assert np.allclose(t2['col0'].value, t['col0'].value, rtol=0, atol=1e-7)
5fd0912dfe3702c9c191e209d9ceae70de5a976be032ece6057391332cadc0d3
# Licensed under a 3-clause BSD style license - see LICENSE.rst import functools import itertools import pytest import numpy as np from .. import Time from ..core import SIDEREAL_TIME_MODELS allclose_hours = functools.partial(np.allclose, rtol=1e-15, atol=3e-8) # 0.1 ms atol; IERS-B files change at that level. within_1_second = functools.partial(np.allclose, rtol=1., atol=1./3600.) within_2_seconds = functools.partial(np.allclose, rtol=1., atol=2./3600.) def test_doc_string_contains_models(): """The doc string is formatted; this ensures this remains working.""" for kind in ('mean', 'apparent'): for model in SIDEREAL_TIME_MODELS[kind]: assert model in Time.sidereal_time.__doc__ class TestERFATestCases(): """Test that we reproduce the test cases given in erfa/src/t_erfa_c.c""" # all tests use the following JD inputs time_ut1 = Time(2400000.5, 53736.0, scale='ut1', format='jd') time_tt = Time(2400000.5, 53736.0, scale='tt', format='jd') # but tt!=ut1 at these dates, unlike what is assumed, so we cannot # reproduce this exactly. Now it does not really matter, # but may as well fake this (and avoid IERS table lookup here) time_ut1.delta_ut1_utc = 0. time_ut1.delta_ut1_utc = 24*3600*(time_ut1.tt.jd2-time_tt.jd2) assert np.allclose(time_ut1.tt.jd2 - time_tt.jd2, 0., atol=1.e-14) @pytest.mark.parametrize('erfa_test_input', ((1.754174972210740592, 1e-12, "eraGmst00"), (1.754174971870091203, 1e-12, "eraGmst06"), (1.754174981860675096, 1e-12, "eraGmst82"), (1.754166138018281369, 1e-12, "eraGst00a"), (1.754166136510680589, 1e-12, "eraGst00b"), (1.754166137675019159, 1e-12, "eraGst06a"), (1.754166136020645203, 1e-12, "eraGst94"))) def test_iau_models(self, erfa_test_input): result, precision, name = erfa_test_input if name[4] == 'm': kind = 'mean' model_name = 'IAU{0:2d}{1:s}'.format(20 if name[7] == '0' else 19, name[7:]) else: kind = 'apparent' model_name = 'IAU{0:2d}{1:s}'.format(20 if name[6] == '0' else 19, name[6:].upper()) assert kind in SIDEREAL_TIME_MODELS.keys() assert model_name in SIDEREAL_TIME_MODELS[kind] model = SIDEREAL_TIME_MODELS[kind][model_name] gst_erfa = self.time_ut1._erfa_sidereal_time(model) assert np.allclose(gst_erfa.to_value('radian'), result, rtol=1., atol=precision) gst = self.time_ut1.sidereal_time(kind, 'greenwich', model_name) assert np.allclose(gst.to_value('radian'), result, rtol=1., atol=precision) class TestST(): """Test Greenwich Sidereal Time. Unlike above, this is relative to what was found earlier, so checks changes in implementation, including leap seconds, rather than correctness""" t1 = Time(['2012-06-30 12:00:00', '2012-06-30 23:59:59', '2012-06-30 23:59:60', '2012-07-01 00:00:00', '2012-07-01 12:00:00'], scale='utc') t2 = Time(t1, location=('120d', '10d')) def test_gmst(self): """Compare Greenwich Mean Sidereal Time with what was found earlier """ gmst_compare = np.array([6.5968497894730564, 18.629426164144697, 18.629704702452862, 18.629983240761003, 6.6628381828899643]) gmst = self.t1.sidereal_time('mean', 'greenwich') assert allclose_hours(gmst.value, gmst_compare) def test_gst(self): """Compare Greenwich Apparent Sidereal Time with what was found earlier """ gst_compare = np.array([6.5971168570494854, 18.629694220878296, 18.62997275921186, 18.630251297545389, 6.6631074284018244]) gst = self.t1.sidereal_time('apparent', 'greenwich') assert allclose_hours(gst.value, gst_compare) def test_gmst_gst_close(self): """Check that Mean and Apparent are within a few seconds.""" gmst = self.t1.sidereal_time('mean', 'greenwich') gst = self.t1.sidereal_time('apparent', 'greenwich') assert within_2_seconds(gst.value, gmst.value) def test_gmst_independent_of_self_location(self): """Check that Greenwich time does not depend on self.location""" gmst1 = self.t1.sidereal_time('mean', 'greenwich') gmst2 = self.t2.sidereal_time('mean', 'greenwich') assert allclose_hours(gmst1.value, gmst2.value) @pytest.mark.parametrize('kind', ('mean', 'apparent')) def test_lst(self, kind): """Compare Local Sidereal Time with what was found earlier, as well as with what is expected from GMST """ lst_compare = { 'mean': np.array([14.596849789473058, 2.629426164144693, 2.6297047024528588, 2.6299832407610033, 14.662838182889967]), 'apparent': np.array([14.597116857049487, 2.6296942208782959, 2.6299727592118565, 2.6302512975453887, 14.663107428401826])} gmst2 = self.t2.sidereal_time(kind, 'greenwich') lmst2 = self.t2.sidereal_time(kind) assert allclose_hours(lmst2.value, lst_compare[kind]) assert allclose_hours((lmst2-gmst2).wrap_at('12h').value, self.t2.location.lon.to_value('hourangle')) # check it also works when one gives longitude explicitly lmst1 = self.t1.sidereal_time(kind, self.t2.location.lon) assert allclose_hours(lmst1.value, lst_compare[kind]) def test_lst_needs_location(self): with pytest.raises(ValueError): self.t1.sidereal_time('mean') with pytest.raises(ValueError): self.t1.sidereal_time('mean', None) class TestModelInterpretation(): """Check that models are different, and that wrong models are recognized""" t = Time(['2012-06-30 12:00:00'], scale='utc', location=('120d', '10d')) @pytest.mark.parametrize('kind', ('mean', 'apparent')) def test_model_uniqueness(self, kind): """Check models give different answers, yet are close.""" for model1, model2 in itertools.combinations( SIDEREAL_TIME_MODELS[kind].keys(), 2): gst1 = self.t.sidereal_time(kind, 'greenwich', model1) gst2 = self.t.sidereal_time(kind, 'greenwich', model2) assert np.all(gst1.value != gst2.value) assert within_1_second(gst1.value, gst2.value) lst1 = self.t.sidereal_time(kind, None, model1) lst2 = self.t.sidereal_time(kind, None, model2) assert np.all(lst1.value != lst2.value) assert within_1_second(lst1.value, lst2.value) @pytest.mark.parametrize(('kind', 'other'), (('mean', 'apparent'), ('apparent', 'mean'))) def test_wrong_models_raise_exceptions(self, kind, other): with pytest.raises(ValueError): self.t.sidereal_time(kind, 'greenwich', 'nonsense') for model in (set(SIDEREAL_TIME_MODELS[other].keys()) - set(SIDEREAL_TIME_MODELS[kind].keys())): with pytest.raises(ValueError): self.t.sidereal_time(kind, 'greenwich', model) with pytest.raises(ValueError): self.t.sidereal_time(kind, None, model)
5c5c2ae05629c3a878e1970356a8e484fb1c6a2cd69cb0997b2428516f95f0a7
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICNSE.rst # Note that files generated by lex/yacc not always fully py 2/3 compatible. # Hence, the ``clean_parse_tables.py`` tool in the astropy-tools # (https://github.com/astropy/astropy-tools) repository should be used to fix # this when/if lextab/parsetab files are re-generated. """ Handles the CDS string format for units """ import operator import os import re from .base import Base from . import core, utils from ..utils import is_effectively_unity from ...utils import classproperty from ...utils.misc import did_you_mean # TODO: Support logarithmic units using bracketed syntax class CDS(Base): """ Support the `Centre de Données astronomiques de Strasbourg <http://cds.u-strasbg.fr/>`_ `Standards for Astronomical Catalogues 2.0 <http://cds.u-strasbg.fr/doc/catstd-3.2.htx>`_ format, and the `complete set of supported units <http://vizier.u-strasbg.fr/cgi-bin/Unit>`_. This format is used by VOTable up to version 1.2. """ _tokens = ( 'PRODUCT', 'DIVISION', 'OPEN_PAREN', 'CLOSE_PAREN', 'X', 'SIGN', 'UINT', 'UFLOAT', 'UNIT' ) @classproperty(lazy=True) def _units(cls): return cls._generate_unit_names() @classproperty(lazy=True) def _parser(cls): return cls._make_parser() @classproperty(lazy=True) def _lexer(cls): return cls._make_lexer() @staticmethod def _generate_unit_names(): from .. import cds from ... import units as u names = {} for key, val in cds.__dict__.items(): if isinstance(val, u.UnitBase): names[key] = val return names @classmethod def _make_lexer(cls): from ...extern.ply import lex tokens = cls._tokens t_PRODUCT = r'\.' t_DIVISION = r'/' t_OPEN_PAREN = r'\(' t_CLOSE_PAREN = r'\)' # NOTE THE ORDERING OF THESE RULES IS IMPORTANT!! # Regular expression rules for simple tokens def t_UFLOAT(t): r'((\d+\.?\d+)|(\.\d+))([eE][+-]?\d+)?' if not re.search(r'[eE\.]', t.value): t.type = 'UINT' t.value = int(t.value) else: t.value = float(t.value) return t def t_UINT(t): r'\d+' t.value = int(t.value) return t def t_SIGN(t): r'[+-](?=\d)' t.value = float(t.value + '1') return t def t_X(t): # multiplication for factor in front of unit r'[x×]' return t def t_UNIT(t): r'\%|°|\\h|((?!\d)\w)+' t.value = cls._get_unit(t) return t t_ignore = '' # Error handling rule def t_error(t): raise ValueError( "Invalid character at col {0}".format(t.lexpos)) lexer = lex.lex(optimize=True, lextab='cds_lextab', outputdir=os.path.dirname(__file__), reflags=re.UNICODE) return lexer @classmethod def _make_parser(cls): """ The grammar here is based on the description in the `Standards for Astronomical Catalogues 2.0 <http://cds.u-strasbg.fr/doc/catstd-3.2.htx>`_, which is not terribly precise. The exact grammar is here is based on the YACC grammar in the `unity library <https://bitbucket.org/nxg/unity/>`_. """ from ...extern.ply import yacc tokens = cls._tokens def p_main(p): ''' main : factor combined_units | combined_units | factor ''' from ..core import Unit if len(p) == 3: p[0] = Unit(p[1] * p[2]) else: p[0] = Unit(p[1]) def p_combined_units(p): ''' combined_units : product_of_units | division_of_units ''' p[0] = p[1] def p_product_of_units(p): ''' product_of_units : unit_expression PRODUCT combined_units | unit_expression ''' if len(p) == 4: p[0] = p[1] * p[3] else: p[0] = p[1] def p_division_of_units(p): ''' division_of_units : DIVISION unit_expression | unit_expression DIVISION combined_units ''' if len(p) == 3: p[0] = p[2] ** -1 else: p[0] = p[1] / p[3] def p_unit_expression(p): ''' unit_expression : unit_with_power | OPEN_PAREN combined_units CLOSE_PAREN ''' if len(p) == 2: p[0] = p[1] else: p[0] = p[2] def p_factor(p): ''' factor : signed_float X UINT signed_int | UINT X UINT signed_int | UINT signed_int | UINT | signed_float ''' if len(p) == 5: if p[3] != 10: raise ValueError( "Only base ten exponents are allowed in CDS") p[0] = p[1] * 10.0 ** p[4] elif len(p) == 3: if p[1] != 10: raise ValueError( "Only base ten exponents are allowed in CDS") p[0] = 10.0 ** p[2] elif len(p) == 2: p[0] = p[1] def p_unit_with_power(p): ''' unit_with_power : UNIT numeric_power | UNIT ''' if len(p) == 2: p[0] = p[1] else: p[0] = p[1] ** p[2] def p_numeric_power(p): ''' numeric_power : sign UINT ''' p[0] = p[1] * p[2] def p_sign(p): ''' sign : SIGN | ''' if len(p) == 2: p[0] = p[1] else: p[0] = 1.0 def p_signed_int(p): ''' signed_int : SIGN UINT ''' p[0] = p[1] * p[2] def p_signed_float(p): ''' signed_float : sign UINT | sign UFLOAT ''' p[0] = p[1] * p[2] def p_error(p): raise ValueError() parser = yacc.yacc(debug=False, tabmodule='cds_parsetab', outputdir=os.path.dirname(__file__), write_tables=True) return parser @classmethod def _get_unit(cls, t): try: return cls._parse_unit(t.value) except ValueError as e: raise ValueError( "At col {0}, {1}".format( t.lexpos, str(e))) @classmethod def _parse_unit(cls, unit, detailed_exception=True): if unit not in cls._units: if detailed_exception: raise ValueError( "Unit '{0}' not supported by the CDS SAC " "standard. {1}".format( unit, did_you_mean( unit, cls._units))) else: raise ValueError() return cls._units[unit] @classmethod def parse(cls, s, debug=False): if ' ' in s: raise ValueError('CDS unit must not contain whitespace') if not isinstance(s, str): s = s.decode('ascii') # This is a short circuit for the case where the string # is just a single unit name try: return cls._parse_unit(s, detailed_exception=False) except ValueError: try: return cls._parser.parse(s, lexer=cls._lexer, debug=debug) except ValueError as e: if str(e): raise ValueError(str(e)) else: raise ValueError("Syntax error") @staticmethod def _get_unit_name(unit): return unit.get_format_name('cds') @classmethod def _format_unit_list(cls, units): out = [] for base, power in units: if power == 1: out.append(cls._get_unit_name(base)) else: out.append('{0}{1}'.format( cls._get_unit_name(base), int(power))) return '.'.join(out) @classmethod def to_string(cls, unit): # Remove units that aren't known to the format unit = utils.decompose_to_known_units(unit, cls._get_unit_name) if isinstance(unit, core.CompositeUnit): if(unit.physical_type == 'dimensionless' and is_effectively_unity(unit.scale*100.)): return '%' if unit.scale == 1: s = '' else: m, e = utils.split_mantissa_exponent(unit.scale) parts = [] if m not in ('', '1'): parts.append(m) if e: if not e.startswith('-'): e = "+" + e parts.append('10{0}'.format(e)) s = 'x'.join(parts) pairs = list(zip(unit.bases, unit.powers)) if len(pairs) > 0: pairs.sort(key=operator.itemgetter(1), reverse=True) s += cls._format_unit_list(pairs) elif isinstance(unit, core.NamedUnit): s = cls._get_unit_name(unit) return s
20eb0f22fcd50fd85736dd86a3defed7c7ea2d14336c1a20e59a127705caa586
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst _tabversion = '3.8' _lextokens = set(('UINT', 'WHITESPACE', 'STAR', 'OPEN_PAREN', 'UNIT', 'SIGN', 'UFLOAT', 'STARSTAR', 'DIVISION', 'LIT10', 'CLOSE_PAREN', 'UNKNOWN')) _lexreflags = 0 _lexliterals = '' _lexstateinfo = {'INITIAL': 'inclusive'} _lexstatere = {'INITIAL': [('(?P<t_UFLOAT>(((\\d+\\.?\\d*)|(\\.\\d+))([eE][+-]?\\d+))|(((\\d+\\.\\d*)|(\\.\\d+))([eE][+-]?\\d+)?))|(?P<t_UINT>\\d+)|(?P<t_SIGN>[+-](?=\\d))|(?P<t_X>[x×])|(?P<t_LIT10>10)|(?P<t_UNKNOWN>[Uu][Nn][Kk][Nn][Oo][Ww][Nn])|(?P<t_UNIT>[a-zA-Z][a-zA-Z_]*)|(?P<t_WHITESPACE>[ \t]+)|(?P<t_STARSTAR>\\*\\*)|(?P<t_CLOSE_PAREN>\\))|(?P<t_OPEN_PAREN>\\()|(?P<t_STAR>\\*)|(?P<t_DIVISION>/)', [None, ('t_UFLOAT', 'UFLOAT'), None, None, None, None, None, None, None, None, None, None, ('t_UINT', 'UINT'), ('t_SIGN', 'SIGN'), ('t_X', 'X'), ('t_LIT10', 'LIT10'), ('t_UNKNOWN', 'UNKNOWN'), ('t_UNIT', 'UNIT'), (None, 'WHITESPACE'), (None, 'STARSTAR'), (None, 'CLOSE_PAREN'), (None, 'OPEN_PAREN'), (None, 'STAR'), (None, 'DIVISION')])]} _lexstateignore = {'INITIAL': ''} _lexstateerrorf = {'INITIAL': 't_error'} _lexstateeoff = {}