hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
4dcaefc2d827635b659c0b06735ab29a4a2d01d174afd2489539a07d8fcf8ceb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module's main purpose is to act as a script to create new versions
of ufunc.c when ERFA is updated (or this generator is enhanced).
`Jinja2 <http://jinja.pocoo.org/>`_ must be installed for this
module/script to function.
Note that this does *not* currently automate the process of creating structs
or dtypes for those structs. They should be added manually in the template file.
"""
# note that we do *not* use unicode_literals here, because that makes the
# generated code's strings have u'' in them on py 2.x
import re
import os.path
from collections import OrderedDict
DEFAULT_ERFA_LOC = os.path.join(os.path.split(__file__)[0],
'../../cextern/erfa')
DEFAULT_TEMPLATE_LOC = os.path.split(__file__)[0]
NDIMS_REX = re.compile(re.escape("numpy.dtype([('fi0', '.*', <(.*)>)])").replace(r'\.\*', '.*').replace(r'\<', '(').replace(r'\>', ')'))
class FunctionDoc:
def __init__(self, doc):
self.doc = doc.replace("**", " ").replace("/*\n", "").replace("*/", "")
self.__input = None
self.__output = None
self.__ret_info = None
def _get_arg_doc_list(self, doc_lines):
"""Parse input/output doc section lines, getting arguments from them.
Ensure all elements of eraASTROM and eraLDBODY are left out, as those
are not input or output arguments themselves. Also remove the nb
argument in from of eraLDBODY, as we infer nb from the python array.
"""
doc_list = []
skip = []
for d in doc_lines:
arg_doc = ArgumentDoc(d)
if arg_doc.name is not None:
if skip:
if skip[0] == arg_doc.name:
skip.pop(0)
continue
else:
raise RuntimeError("We whould be skipping {} "
"but {} encountered."
.format(skip[0], arg_doc.name))
if arg_doc.type.startswith('eraLDBODY'):
# Special-case LDBODY: for those, the previous argument
# is always the number of bodies, but we don't need it
# as an input argument for the ufunc since we're going
# to determine this from the array itself. Also skip
# the description of its contents; those are not arguments.
doc_list.pop()
skip = ['bm', 'dl', 'pv']
elif arg_doc.type.startswith('eraASTROM'):
# Special-case ASTROM: need to skip the description
# of its contents; those are not arguments.
skip = ['pmt', 'eb', 'eh', 'em', 'v', 'bm1',
'bpn', 'along', 'xpl', 'ypl', 'sphi',
'cphi', 'diurab', 'eral', 'refa', 'refb']
doc_list.append(arg_doc)
return doc_list
@property
def input(self):
if self.__input is None:
self.__input = []
for regex in ("Given([^\n]*):\n(.+?) \n",
"Given and returned([^\n]*):\n(.+?) \n"):
result = re.search(regex, self.doc, re.DOTALL)
if result is not None:
doc_lines = result.group(2).split("\n")
self.__input += self._get_arg_doc_list(doc_lines)
return self.__input
@property
def output(self):
if self.__output is None:
self.__output = []
for regex in ("Given and returned([^\n]*):\n(.+?) \n",
"Returned([^\n]*):\n(.+?) \n"):
result = re.search(regex, self.doc, re.DOTALL)
if result is not None:
doc_lines = result.group(2).split("\n")
self.__output += self._get_arg_doc_list(doc_lines)
return self.__output
@property
def ret_info(self):
if self.__ret_info is None:
ret_info = []
result = re.search("Returned \\(function value\\)([^\n]*):\n(.+?) \n", self.doc, re.DOTALL)
if result is not None:
ret_info.append(ReturnDoc(result.group(2)))
if len(ret_info) == 0:
self.__ret_info = ''
elif len(ret_info) == 1:
self.__ret_info = ret_info[0]
else:
raise ValueError("Multiple C return sections found in this doc:\n" + self.doc)
return self.__ret_info
def __repr__(self):
return self.doc.replace(" \n", "\n")
class ArgumentDoc:
def __init__(self, doc):
match = re.search("^ +([^ ]+)[ ]+([^ ]+)[ ]+(.+)", doc)
if match is not None:
self.name = match.group(1)
self.type = match.group(2)
self.doc = match.group(3)
else:
self.name = None
self.type = None
self.doc = None
def __repr__(self):
return " {0:15} {1:15} {2}".format(self.name, self.type, self.doc)
class Variable:
"""Properties shared by Argument and Return."""
@property
def npy_type(self):
"""Predefined type used by numpy ufuncs to indicate a given ctype.
Eg., NPY_DOUBLE for double.
"""
return "NPY_" + self.ctype.upper()
@property
def dtype(self):
"""Name of dtype corresponding to the ctype.
Specifically,
double : dt_double
int : dt_int
double[3]: dt_vector
double[2][3] : dt_pv
double[2] : dt_pvdpv
double[3][3] : dt_matrix
int[4] : dt_ymdf | dt_hmsf | dt_dmsf, depding on name
eraASTROM: dt_eraASTROM
eraLDBODY: dt_eraLDBODY
char : dt_sign
char[] : dt_type
The corresponding dtypes are defined in ufunc.c, where they are
used for the loop definitions. In core.py, they are also used
to view-cast regular arrays to these structured dtypes.
"""
if self.ctype == 'const char':
return 'dt_type'
elif self.ctype == 'char':
return 'dt_sign'
elif self.ctype == 'int' and self.shape == (4,):
return 'dt_' + self.name[1:]
elif self.ctype == 'double' and self.shape == (3,):
return 'dt_double'
elif self.ctype == 'double' and self.shape == (2, 3):
return 'dt_pv'
elif self.ctype == 'double' and self.shape == (2,):
return 'dt_pvdpv'
elif self.ctype == 'double' and self.shape == (3, 3):
return 'dt_double'
elif not self.shape:
return 'dt_' + self.ctype
else:
raise ValueError("ctype {} with shape {} not recognized."
.format(self.ctype, self.shape))
@property
def view_dtype(self):
"""Name of dtype corresponding to the ctype for viewing back as array.
E.g., dt_double for double, dt_double33 for double[3][3].
The types are defined in core.py, where they are used for view-casts
of structured results as regular arrays.
"""
if self.ctype == 'const char':
return 'dt_bytes12'
elif self.ctype == 'char':
return 'dt_bytes1'
else:
raise ValueError('Only char ctype should need view back!')
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
size = 1
for s in self.shape:
size *= s
return size
@property
def cshape(self):
return ''.join(['[{0}]'.format(s) for s in self.shape])
@property
def signature_shape(self):
if self.ctype == 'eraLDBODY':
return '(n)'
elif self.ctype == 'double' and self.shape == (3,):
return '(d3)'
elif self.ctype == 'double' and self.shape == (3, 3):
return '(d3, d3)'
else:
return '()'
class Argument(Variable):
def __init__(self, definition, doc):
self.definition = definition
self.doc = doc
self.__inout_state = None
self.ctype, ptr_name_arr = definition.strip().rsplit(" ", 1)
if "*" == ptr_name_arr[0]:
self.is_ptr = True
name_arr = ptr_name_arr[1:]
else:
self.is_ptr = False
name_arr = ptr_name_arr
if "[]" in ptr_name_arr:
self.is_ptr = True
name_arr = name_arr[:-2]
if "[" in name_arr:
self.name, arr = name_arr.split("[", 1)
self.shape = tuple([int(size) for size in arr[:-1].split("][")])
else:
self.name = name_arr
self.shape = ()
@property
def inout_state(self):
if self.__inout_state is None:
self.__inout_state = ''
for i in self.doc.input:
if self.name in i.name.split(','):
self.__inout_state = 'in'
for o in self.doc.output:
if self.name in o.name.split(','):
if self.__inout_state == 'in':
self.__inout_state = 'inout'
else:
self.__inout_state = 'out'
return self.__inout_state
@property
def name_for_call(self):
"""How the argument should be used in the call to the ERFA function.
This takes care of ensuring that inputs are passed by value,
as well as adding back the number of bodies for any LDBODY argument.
The latter presumes that in the ufunc inner loops, that number is
called 'nb'.
"""
if self.ctype == 'eraLDBODY':
assert self.name == 'b'
return 'nb, _' + self.name
elif self.is_ptr:
return '_'+self.name
else:
return '*_'+self.name
def __repr__(self):
return "Argument('{0}', name='{1}', ctype='{2}', inout_state='{3}')".format(self.definition, self.name, self.ctype, self.inout_state)
class ReturnDoc:
def __init__(self, doc):
self.doc = doc
self.infoline = doc.split('\n')[0].strip()
self.type = self.infoline.split()[0]
self.descr = self.infoline.split()[1]
if self.descr.startswith('status'):
self.statuscodes = statuscodes = {}
code = None
for line in doc[doc.index(':')+1:].split('\n'):
ls = line.strip()
if ls != '':
if ' = ' in ls:
code, msg = ls.split(' = ')
if code != 'else':
code = int(code)
statuscodes[code] = msg
elif code is not None:
statuscodes[code] += ls
else:
self.statuscodes = None
def __repr__(self):
return "Return value, type={0:15}, {1}, {2}".format(self.type, self.descr, self.doc)
class Return(Variable):
def __init__(self, ctype, doc):
self.name = 'c_retval'
self.inout_state = 'stat' if ctype == 'int' else 'ret'
self.ctype = ctype
self.shape = ()
self.doc = doc
def __repr__(self):
return "Return(name='{0}', ctype='{1}', inout_state='{2}')".format(self.name, self.ctype, self.inout_state)
@property
def doc_info(self):
return self.doc.ret_info
class Function:
"""
A class representing a C function.
Parameters
----------
name : str
The name of the function
source_path : str
Either a directory, which means look for the function in a
stand-alone file (like for the standard ERFA distribution), or a
file, which means look for the function in that file (as for the
astropy-packaged single-file erfa.c).
match_line : str, optional
If given, searching of the source file will skip until it finds
a line matching this string, and start from there.
"""
def __init__(self, name, source_path, match_line=None):
self.name = name
self.pyname = name.split('era')[-1].lower()
self.filename = self.pyname+".c"
if os.path.isdir(source_path):
self.filepath = os.path.join(os.path.normpath(source_path), self.filename)
else:
self.filepath = source_path
with open(self.filepath) as f:
if match_line:
line = f.readline()
while line != '':
if line.startswith(match_line):
filecontents = '\n' + line + f.read()
break
line = f.readline()
else:
msg = ('Could not find the match_line "{0}" in '
'the source file "{1}"')
raise ValueError(msg.format(match_line, self.filepath))
else:
filecontents = f.read()
pattern = r"\n([^\n]+{0} ?\([^)]+\)).+?(/\*.+?\*/)".format(name)
p = re.compile(pattern, flags=re.DOTALL | re.MULTILINE)
search = p.search(filecontents)
self.cfunc = " ".join(search.group(1).split())
self.doc = FunctionDoc(search.group(2))
self.args = []
for arg in re.search(r"\(([^)]+)\)", self.cfunc).group(1).split(', '):
self.args.append(Argument(arg, self.doc))
self.ret = re.search("^(.*){0}".format(name), self.cfunc).group(1).strip()
if self.ret != 'void':
self.args.append(Return(self.ret, self.doc))
def args_by_inout(self, inout_filter, prop=None, join=None):
"""
Gives all of the arguments and/or returned values, depending on whether
they are inputs, outputs, etc.
The value for `inout_filter` should be a string containing anything
that arguments' `inout_state` attribute produces. Currently, that can be:
* "in" : input
* "out" : output
* "inout" : something that's could be input or output (e.g. a struct)
* "ret" : the return value of the C function
* "stat" : the return value of the C function if it is a status code
It can also be a "|"-separated string giving inout states to OR
together.
"""
result = []
for arg in self.args:
if arg.inout_state in inout_filter.split('|'):
if prop is None:
result.append(arg)
else:
result.append(getattr(arg, prop))
if join is not None:
return join.join(result)
else:
return result
@property
def user_dtype(self):
"""The non-standard dtype, if any, needed by this function's ufunc.
This would be any structured array for any input or output, but
we give preference to LDBODY, since that also decides that the ufunc
should be a generalized ufunc.
"""
user_dtype = None
for arg in self.args_by_inout('in|inout|out'):
if arg.ctype == 'eraLDBODY':
return arg.dtype
elif user_dtype is None and arg.dtype not in ('dt_double',
'dt_int'):
user_dtype = arg.dtype
return user_dtype
@property
def signature(self):
"""Possible signature, if this function should be a gufunc."""
if all(arg.signature_shape == '()'
for arg in self.args_by_inout('in|inout|out')):
return None
return '->'.join(
[','.join([arg.signature_shape for arg in args])
for args in (self.args_by_inout('in|inout'),
self.args_by_inout('inout|out|ret|stat'))])
def _d3_fix_arg_and_index(self):
if not any('d3' in arg.signature_shape
for arg in self.args_by_inout('in|inout')):
for j, arg in enumerate(self.args_by_inout('out')):
if 'd3' in arg.signature_shape:
return j, arg
return None, None
@property
def d3_fix_op_index(self):
"""Whether only output arguments have a d3 dimension."""
index = self._d3_fix_arg_and_index()[0]
if index is not None:
len_in = len(list(self.args_by_inout('in')))
len_inout = len(list(self.args_by_inout('inout')))
index += + len_in + 2 * len_inout
return index
@property
def d3_fix_arg(self):
"""Whether only output arguments have a d3 dimension."""
return self._d3_fix_arg_and_index()[1]
@property
def python_call(self):
outnames = [arg.name for arg in self.args_by_inout('inout|out|stat|ret')]
argnames = [arg.name for arg in self.args_by_inout('in|inout')]
argnames += [arg.name for arg in self.args_by_inout('inout')]
d3fix_index = self._d3_fix_arg_and_index()[0]
if d3fix_index is not None:
argnames += ['None'] * d3fix_index + [self.d3_fix_arg.name]
return '{out} = {func}({args})'.format(out=', '.join(outnames),
func='ufunc.' + self.pyname,
args=', '.join(argnames))
def __repr__(self):
return "Function(name='{0}', pyname='{1}', filename='{2}', filepath='{3}')".format(self.name, self.pyname, self.filename, self.filepath)
class Constant:
def __init__(self, name, value, doc):
self.name = name.replace("ERFA_", "")
self.value = value.replace("ERFA_", "")
self.doc = doc
class ExtraFunction(Function):
"""
An "extra" function - e.g. one not following the SOFA/ERFA standard format.
Parameters
----------
cname : str
The name of the function in C
prototype : str
The prototype for the function (usually derived from the header)
pathfordoc : str
The path to a file that contains the prototype, with the documentation
as a multiline string *before* it.
"""
def __init__(self, cname, prototype, pathfordoc):
self.name = cname
self.pyname = cname.split('era')[-1].lower()
self.filepath, self.filename = os.path.split(pathfordoc)
self.prototype = prototype.strip()
if prototype.endswith('{') or prototype.endswith(';'):
self.prototype = prototype[:-1].strip()
incomment = False
lastcomment = None
with open(pathfordoc, 'r') as f:
for l in f:
if incomment:
if l.lstrip().startswith('*/'):
incomment = False
lastcomment = ''.join(lastcomment)
else:
if l.startswith('**'):
l = l[2:]
lastcomment.append(l)
else:
if l.lstrip().startswith('/*'):
incomment = True
lastcomment = []
if l.startswith(self.prototype):
self.doc = lastcomment
break
else:
raise ValueError('Did not find prototype {} in file '
'{}'.format(self.prototype, pathfordoc))
self.args = []
argset = re.search(r"{0}\(([^)]+)?\)".format(self.name),
self.prototype).group(1)
if argset is not None:
for arg in argset.split(', '):
self.args.append(Argument(arg, self.doc))
self.ret = re.match("^(.*){0}".format(self.name),
self.prototype).group(1).strip()
if self.ret != 'void':
self.args.append(Return(self.ret, self.doc))
def __repr__(self):
r = super().__repr__()
if r.startswith('Function'):
r = 'Extra' + r
return r
def main(srcdir=DEFAULT_ERFA_LOC, outfn='core.py', ufuncfn='ufunc.c',
templateloc=DEFAULT_TEMPLATE_LOC, extra='erfa_additions.h',
verbose=True):
from jinja2 import Environment, FileSystemLoader
if verbose:
print_ = lambda *args, **kwargs: print(*args, **kwargs)
else:
print_ = lambda *args, **kwargs: None
# Prepare the jinja2 templating environment
env = Environment(loader=FileSystemLoader(templateloc))
def prefix(a_list, pre):
return [pre+'{0}'.format(an_element) for an_element in a_list]
def postfix(a_list, post):
return ['{0}'.format(an_element)+post for an_element in a_list]
def surround(a_list, pre, post):
return [pre+'{0}'.format(an_element)+post for an_element in a_list]
env.filters['prefix'] = prefix
env.filters['postfix'] = postfix
env.filters['surround'] = surround
erfa_c_in = env.get_template(ufuncfn + '.templ')
erfa_py_in = env.get_template(outfn + '.templ')
# Extract all the ERFA function names from erfa.h
if os.path.isdir(srcdir):
erfahfn = os.path.join(srcdir, 'erfa.h')
multifilserc = True
else:
erfahfn = os.path.join(os.path.split(srcdir)[0], 'erfa.h')
multifilserc = False
with open(erfahfn, "r") as f:
erfa_h = f.read()
print_("read erfa header")
if extra:
with open(os.path.join(templateloc or '.', extra), "r") as f:
erfa_h += f.read()
print_("read extra header")
funcs = OrderedDict()
section_subsection_functions = re.findall(
r'/\* (\w*)/(\w*) \*/\n(.*?)\n\n', erfa_h,
flags=re.DOTALL | re.MULTILINE)
for section, subsection, functions in section_subsection_functions:
print_("{0}.{1}".format(section, subsection))
# Right now, we compile everything, but one could be more selective.
# In particular, at the time of writing (2018-06-11), what was
# actually require for astropy was not quite everything, but:
# ((section == 'Extra')
# or (section == "Astronomy")
# or (subsection == "AngleOps")
# or (subsection == "SphericalCartesian")
# or (subsection == "MatrixVectorProducts")
# or (subsection == 'VectorOps'))
if True:
func_names = re.findall(r' (\w+)\(.*?\);', functions,
flags=re.DOTALL)
for name in func_names:
print_("{0}.{1}.{2}...".format(section, subsection, name))
if multifilserc:
# easy because it just looks in the file itself
cdir = (srcdir if section != 'Extra' else
templateloc or '.')
funcs[name] = Function(name, cdir)
else:
# Have to tell it to look for a declaration matching
# the start of the header declaration, otherwise it
# might find a *call* of the function instead of the
# definition
for line in functions.split(r'\n'):
if name in line:
# [:-1] is to remove trailing semicolon, and
# splitting on '(' is because the header and
# C files don't necessarily have to match
# argument names and line-breaking or
# whitespace
match_line = line[:-1].split('(')[0]
funcs[name] = Function(name, cdir, match_line)
break
else:
raise ValueError("A name for a C file wasn't "
"found in the string that "
"spawned it. This should be "
"impossible!")
funcs = funcs.values()
# Extract all the ERFA constants from erfam.h
erfamhfn = os.path.join(srcdir, 'erfam.h')
with open(erfamhfn, 'r') as f:
erfa_m_h = f.read()
constants = []
for chunk in erfa_m_h.split("\n\n"):
result = re.findall(r"#define (ERFA_\w+?) (.+?)$", chunk,
flags=re.DOTALL | re.MULTILINE)
if result:
doc = re.findall(r"/\* (.+?) \*/\n", chunk, flags=re.DOTALL)
for (name, value) in result:
constants.append(Constant(name, value, doc))
# TODO: re-enable this when const char* return values and
# non-status code integer rets are possible
# #Add in any "extra" functions from erfaextra.h
# erfaextrahfn = os.path.join(srcdir, 'erfaextra.h')
# with open(erfaextrahfn, 'r') as f:
# for l in f:
# ls = l.strip()
# match = re.match('.* (era.*)\(', ls)
# if match:
# print_("Extra: {0} ...".format(match.group(1)))
# funcs.append(ExtraFunction(match.group(1), ls, erfaextrahfn))
print_("Rendering template")
erfa_c = erfa_c_in.render(funcs=funcs)
erfa_py = erfa_py_in.render(funcs=funcs, constants=constants)
if outfn is not None:
print_("Saving to", outfn, 'and', ufuncfn)
with open(os.path.join(templateloc, outfn), "w") as f:
f.write(erfa_py)
with open(os.path.join(templateloc, ufuncfn), "w") as f:
f.write(erfa_c)
print_("Done!")
return erfa_c, erfa_py, funcs
if __name__ == '__main__':
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument('srcdir', default=DEFAULT_ERFA_LOC, nargs='?',
help='Directory where the ERFA c and header files '
'can be found or to a single erfa.c file '
'(which must be in the same directory as '
'erfa.h). Defaults to the builtin astropy '
'erfa: "{0}"'.format(DEFAULT_ERFA_LOC))
ap.add_argument('-o', '--output', default='core.py',
help='The output filename for the pure-python output.')
ap.add_argument('-u', '--ufunc', default='ufunc.c',
help='The output filename for the ufunc .c output')
ap.add_argument('-t', '--template-loc',
default=DEFAULT_TEMPLATE_LOC,
help='the location where the "core.py.templ" and '
'"ufunc.c.templ templates can be found.')
ap.add_argument('-x', '--extra',
default='erfa_additions.h',
help='header file for any extra files in the template '
'location that should be included.')
ap.add_argument('-q', '--quiet', action='store_false', dest='verbose',
help='Suppress output normally printed to stdout.')
args = ap.parse_args()
main(args.srcdir, args.output, args.ufunc, args.template_loc,
args.extra)
|
d5acab1ebdbad9d14bfdbbf869be6dd41bed4743dce35f8f4cf77dcba6fe628b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import glob
from distutils import log
from distutils.extension import Extension
from astropy_helpers import setup_helpers
from astropy_helpers.version_helpers import get_pkg_version_module
ERFAPKGDIR = os.path.relpath(os.path.dirname(__file__))
ERFA_SRC = os.path.abspath(os.path.join(ERFAPKGDIR, '..', '..',
'cextern', 'erfa'))
SRC_FILES = glob.glob(os.path.join(ERFA_SRC, '*'))
SRC_FILES += [os.path.join(ERFAPKGDIR, filename)
for filename in ['pav2pv.c', 'pv2pav.c', 'erfa_additions.h',
'ufunc.c.templ', 'core.py.templ',
'erfa_generator.py']]
GEN_FILES = [os.path.join(ERFAPKGDIR, 'core.py'),
os.path.join(ERFAPKGDIR, 'ufunc.c')]
def pre_build_py_hook(cmd_obj):
preprocess_source()
def pre_build_ext_hook(cmd_obj):
preprocess_source()
def pre_sdist_hook(cmd_obj):
preprocess_source()
def preprocess_source():
# Generating the ERFA wrappers should only be done if needed. This also
# ensures that it is not done for any release tarball since those will
# include core.py and ufunc.c.
if all(os.path.exists(filename) for filename in GEN_FILES):
# Determine modification times
erfa_mtime = max(os.path.getmtime(filename) for filename in SRC_FILES)
gen_mtime = min(os.path.getmtime(filename) for filename in GEN_FILES)
version = get_pkg_version_module('astropy')
if gen_mtime > erfa_mtime:
# If generated source is recent enough, don't update
return
elif version.release:
# or, if we're on a release, issue a warning, but go ahead and use
# the wrappers anyway
log.warn('WARNING: The autogenerated wrappers in astropy._erfa '
'seem to be older than the source templates used to '
'create them. Because this is a release version we will '
'use them anyway, but this might be a sign of some sort '
'of version mismatch or other tampering. Or it might just '
'mean you moved some files around or otherwise '
'accidentally changed timestamps.')
return
# otherwise rebuild the autogenerated files
# If jinja2 isn't present, then print a warning and use existing files
try:
import jinja2 # pylint: disable=W0611
except ImportError:
log.warn("WARNING: jinja2 could not be imported, so the existing "
"ERFA core.py and ufunc.c files will be used")
return
name = 'erfa_generator'
filename = os.path.join(ERFAPKGDIR, 'erfa_generator.py')
try:
from importlib import machinery as import_machinery
loader = import_machinery.SourceFileLoader(name, filename)
gen = loader.load_module()
except ImportError:
import imp
gen = imp.load_source(name, filename)
gen.main(verbose=False)
def get_extensions():
sources = [os.path.join(ERFAPKGDIR, fn)
for fn in ("ufunc.c", "pav2pv.c", "pv2pav.c")]
include_dirs = ['numpy']
libraries = []
if setup_helpers.use_system_library('erfa'):
libraries.append('erfa')
else:
# get all of the .c files in the cextern/erfa directory
erfafns = os.listdir(ERFA_SRC)
sources.extend(['cextern/erfa/' + fn
for fn in erfafns if fn.endswith('.c')])
include_dirs.append('cextern/erfa')
erfa_ext = Extension(
name="astropy._erfa.ufunc",
sources=sources,
include_dirs=include_dirs,
libraries=libraries,
language="c",)
return [erfa_ext]
def get_external_libraries():
return ['erfa']
|
a0fd9b239b1111b5893ec4f08aa9db90f68485603b577a77479e3bc91b2cad72 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
astropy.wcs-specific utilities for generating boilerplate in docstrings.
"""
__all__ = ['TWO_OR_MORE_ARGS', 'RETURNS', 'ORIGIN', 'RA_DEC_ORDER']
def _fix(content, indent=0):
lines = content.split('\n')
indent = '\n' + ' ' * indent
return indent.join(lines)
def TWO_OR_MORE_ARGS(naxis, indent=0):
return _fix(
"""args : flexible
There are two accepted forms for the positional arguments:
- 2 arguments: An *N* x *{0}* array of coordinates, and an
*origin*.
- more than 2 arguments: An array for each axis, followed by
an *origin*. These arrays must be broadcastable to one
another.
Here, *origin* is the coordinate in the upper left corner of the
image. In FITS and Fortran standards, this is 1. In Numpy and C
standards this is 0.
""".format(naxis), indent)
def RETURNS(out_type, indent=0):
return _fix("""result : array
Returns the {0}. If the input was a single array and
origin, a single array is returned, otherwise a tuple of arrays is
returned.""".format(out_type), indent)
def ORIGIN(indent=0):
return _fix(
"""
origin : int
Specifies the origin of pixel values. The Fortran and FITS
standards use an origin of 1. Numpy and C use array indexing with
origin at 0.
""", indent)
def RA_DEC_ORDER(indent=0):
return _fix(
"""
ra_dec_order : bool, optional
When `True` will ensure that world coordinates are always given
and returned in as (*ra*, *dec*) pairs, regardless of the order of
the axes specified by the in the ``CTYPE`` keywords. Default is
`False`.
""", indent)
|
b21f8aa38dbdca7d92a4cbeda97729e50f44925ef2382fc5cb141cc838bd4c50 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
CONTACT = "Michael Droettboom"
EMAIL = "[email protected]"
import io
from os.path import join
import os.path
import shutil
import sys
from distutils.core import Extension
from distutils.dep_util import newer_group
from astropy_helpers import setup_helpers
from astropy_helpers.distutils_helpers import get_distutils_build_option
WCSROOT = os.path.relpath(os.path.dirname(__file__))
WCSVERSION = "5.19.1"
def b(s):
return s.encode('ascii')
def string_escape(s):
s = s.decode('ascii').encode('ascii', 'backslashreplace')
s = s.replace(b'\n', b'\\n')
s = s.replace(b'\0', b'\\0')
return s.decode('ascii')
def determine_64_bit_int():
"""
The only configuration parameter needed at compile-time is how to
specify a 64-bit signed integer. Python's ctypes module can get us
that information.
If we can't be absolutely certain, we default to "long long int",
which is correct on most platforms (x86, x86_64). If we find
platforms where this heuristic doesn't work, we may need to
hardcode for them.
"""
try:
try:
import ctypes
except ImportError:
raise ValueError()
if ctypes.sizeof(ctypes.c_longlong) == 8:
return "long long int"
elif ctypes.sizeof(ctypes.c_long) == 8:
return "long int"
elif ctypes.sizeof(ctypes.c_int) == 8:
return "int"
else:
raise ValueError()
except ValueError:
return "long long int"
def write_wcsconfig_h(paths):
"""
Writes out the wcsconfig.h header with local configuration.
"""
h_file = io.StringIO()
h_file.write("""
/* The bundled version has WCSLIB_VERSION */
#define HAVE_WCSLIB_VERSION 1
/* WCSLIB library version number. */
#define WCSLIB_VERSION {0}
/* 64-bit integer data type. */
#define WCSLIB_INT64 {1}
/* Windows needs some other defines to prevent inclusion of wcsset()
which conflicts with wcslib's wcsset(). These need to be set
on code that *uses* astropy.wcs, in addition to astropy.wcs itself.
*/
#if defined(_WIN32) || defined(_MSC_VER) || defined(__MINGW32__) || defined (__MINGW64__)
#ifndef YY_NO_UNISTD_H
#define YY_NO_UNISTD_H
#endif
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS
#endif
#ifndef _NO_OLDNAMES
#define _NO_OLDNAMES
#endif
#ifndef NO_OLDNAMES
#define NO_OLDNAMES
#endif
#ifndef __STDC__
#define __STDC__ 1
#endif
#endif
""".format(WCSVERSION, determine_64_bit_int()))
content = h_file.getvalue().encode('ascii')
for path in paths:
setup_helpers.write_if_different(path, content)
######################################################################
# GENERATE DOCSTRINGS IN C
def generate_c_docstrings():
from astropy.wcs import docstrings
docstrings = docstrings.__dict__
keys = [
key for key, val in docstrings.items()
if not key.startswith('__') and isinstance(val, str)]
keys.sort()
docs = {}
for key in keys:
docs[key] = docstrings[key].encode('utf8').lstrip() + b'\0'
h_file = io.StringIO()
h_file.write("""/*
DO NOT EDIT!
This file is autogenerated by astropy/wcs/setup_package.py. To edit
its contents, edit astropy/wcs/docstrings.py
*/
#ifndef __DOCSTRINGS_H__
#define __DOCSTRINGS_H__
""")
for key in keys:
val = docs[key]
h_file.write('extern char doc_{0}[{1}];\n'.format(key, len(val)))
h_file.write("\n#endif\n\n")
setup_helpers.write_if_different(
join(WCSROOT, 'include', 'astropy_wcs', 'docstrings.h'),
h_file.getvalue().encode('utf-8'))
c_file = io.StringIO()
c_file.write("""/*
DO NOT EDIT!
This file is autogenerated by astropy/wcs/setup_package.py. To edit
its contents, edit astropy/wcs/docstrings.py
The weirdness here with strncpy is because some C compilers, notably
MSVC, do not support string literals greater than 256 characters.
*/
#include <string.h>
#include "astropy_wcs/docstrings.h"
""")
for key in keys:
val = docs[key]
c_file.write('char doc_{0}[{1}] = {{\n'.format(key, len(val)))
for i in range(0, len(val), 12):
section = val[i:i+12]
c_file.write(' ')
c_file.write(''.join('0x{0:02x}, '.format(x) for x in section))
c_file.write('\n')
c_file.write(" };\n\n")
setup_helpers.write_if_different(
join(WCSROOT, 'src', 'docstrings.c'),
c_file.getvalue().encode('utf-8'))
def get_wcslib_cfg(cfg, wcslib_files, include_paths):
from astropy.version import debug
cfg['include_dirs'].append('numpy')
cfg['define_macros'].extend([
('ECHO', None),
('WCSTRIG_MACRO', None),
('ASTROPY_WCS_BUILD', None),
('_GNU_SOURCE', None)])
if (not setup_helpers.use_system_library('wcslib') or
sys.platform == 'win32'):
write_wcsconfig_h(include_paths)
wcslib_path = join("cextern", "wcslib") # Path to wcslib
wcslib_cpath = join(wcslib_path, "C") # Path to wcslib source files
cfg['sources'].extend(join(wcslib_cpath, x) for x in wcslib_files)
cfg['include_dirs'].append(wcslib_cpath)
else:
wcsconfig_h_path = join(WCSROOT, 'include', 'wcsconfig.h')
if os.path.exists(wcsconfig_h_path):
os.unlink(wcsconfig_h_path)
cfg.update(setup_helpers.pkg_config(['wcslib'], ['wcs']))
if debug:
cfg['define_macros'].append(('DEBUG', None))
cfg['undef_macros'].append('NDEBUG')
if (not sys.platform.startswith('sun') and
not sys.platform == 'win32'):
cfg['extra_compile_args'].extend(["-fno-inline", "-O0", "-g"])
else:
# Define ECHO as nothing to prevent spurious newlines from
# printing within the libwcs parser
cfg['define_macros'].append(('NDEBUG', None))
cfg['undef_macros'].append('DEBUG')
if sys.platform == 'win32':
# These are written into wcsconfig.h, but that file is not
# used by all parts of wcslib.
cfg['define_macros'].extend([
('YY_NO_UNISTD_H', None),
('_CRT_SECURE_NO_WARNINGS', None),
('_NO_OLDNAMES', None), # for mingw32
('NO_OLDNAMES', None), # for mingw64
('__STDC__', None) # for MSVC
])
if sys.platform.startswith('linux'):
cfg['define_macros'].append(('HAVE_SINCOS', None))
# Squelch a few compilation warnings in WCSLIB
if setup_helpers.get_compiler_option() in ('unix', 'mingw32'):
if not get_distutils_build_option('debug'):
cfg['extra_compile_args'].extend([
'-Wno-strict-prototypes',
'-Wno-unused-function',
'-Wno-unused-value',
'-Wno-uninitialized'])
def get_extensions():
generate_c_docstrings()
######################################################################
# DISTUTILS SETUP
cfg = setup_helpers.DistutilsExtensionArgs()
wcslib_files = [ # List of wcslib files to compile
'flexed/wcsbth.c',
'flexed/wcspih.c',
'flexed/wcsulex.c',
'flexed/wcsutrn.c',
'cel.c',
'dis.c',
'lin.c',
'log.c',
'prj.c',
'spc.c',
'sph.c',
'spx.c',
'tab.c',
'wcs.c',
'wcserr.c',
'wcsfix.c',
'wcshdr.c',
'wcsprintf.c',
'wcsunits.c',
'wcsutil.c'
]
wcslib_config_paths = [
join(WCSROOT, 'include', 'astropy_wcs', 'wcsconfig.h'),
join(WCSROOT, 'include', 'wcsconfig.h')
]
get_wcslib_cfg(cfg, wcslib_files, wcslib_config_paths)
cfg['include_dirs'].append(join(WCSROOT, "include"))
astropy_wcs_files = [ # List of astropy.wcs files to compile
'distortion.c',
'distortion_wrap.c',
'docstrings.c',
'pipeline.c',
'pyutil.c',
'astropy_wcs.c',
'astropy_wcs_api.c',
'sip.c',
'sip_wrap.c',
'str_list_proxy.c',
'unit_list_proxy.c',
'util.c',
'wcslib_wrap.c',
'wcslib_tabprm_wrap.c']
cfg['sources'].extend(join(WCSROOT, 'src', x) for x in astropy_wcs_files)
cfg['sources'] = [str(x) for x in cfg['sources']]
cfg = dict((str(key), val) for key, val in cfg.items())
return [Extension(str('astropy.wcs._wcs'), **cfg)]
def get_package_data():
# Installs the testing data files
api_files = [
'astropy_wcs.h',
'astropy_wcs_api.h',
'distortion.h',
'isnan.h',
'pipeline.h',
'pyutil.h',
'sip.h',
'util.h',
'wcsconfig.h',
]
api_files = [join('include', 'astropy_wcs', x) for x in api_files]
api_files.append(join('include', 'astropy_wcs_api.h'))
wcslib_headers = [
'cel.h',
'lin.h',
'prj.h',
'spc.h',
'spx.h',
'tab.h',
'wcs.h',
'wcserr.h',
'wcsmath.h',
'wcsprintf.h',
]
if not setup_helpers.use_system_library('wcslib'):
for header in wcslib_headers:
source = join('cextern', 'wcslib', 'C', header)
dest = join('astropy', 'wcs', 'include', 'wcslib', header)
if newer_group([source], dest, 'newer'):
shutil.copy(source, dest)
api_files.append(join('include', 'wcslib', header))
return {
str('astropy.wcs.tests'): ['data/*.hdr', 'data/*.fits',
'data/*.txt', 'data/*.fits.gz',
'maps/*.hdr', 'spectra/*.hdr',
'extension/*.c'],
str('astropy.wcs'): api_files,
}
def get_external_libraries():
return ['wcslib']
|
d5487ec59f3be8ab89f8f74fe746ba2ad01bfac28e45e4c36e911ccb9d2eda68 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from .. import units as u
from .wcs import WCS, WCSSUB_LONGITUDE, WCSSUB_LATITUDE
__doctest_skip__ = ['wcs_to_celestial_frame', 'celestial_frame_to_wcs']
__all__ = ['add_stokes_axis_to_wcs', 'celestial_frame_to_wcs',
'wcs_to_celestial_frame', 'proj_plane_pixel_scales',
'proj_plane_pixel_area', 'is_proj_plane_distorted',
'non_celestial_pixel_scales', 'skycoord_to_pixel',
'pixel_to_skycoord', 'custom_wcs_to_frame_mappings',
'custom_frame_to_wcs_mappings']
def add_stokes_axis_to_wcs(wcs, add_before_ind):
"""
Add a new Stokes axis that is uncorrelated with any other axes.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to add to
add_before_ind : int
Index of the WCS to insert the new Stokes axis in front of.
To add at the end, do add_before_ind = wcs.wcs.naxis
The beginning is at position 0.
Returns
-------
A new `~astropy.wcs.WCS` instance with an additional axis
"""
inds = [i + 1 for i in range(wcs.wcs.naxis)]
inds.insert(add_before_ind, 0)
newwcs = wcs.sub(inds)
newwcs.wcs.ctype[add_before_ind] = 'STOKES'
newwcs.wcs.cname[add_before_ind] = 'STOKES'
return newwcs
def _wcs_to_celestial_frame_builtin(wcs):
# Import astropy.coordinates here to avoid circular imports
from ..coordinates import FK4, FK4NoETerms, FK5, ICRS, ITRS, Galactic
# Import astropy.time here otherwise setup.py fails before extensions are compiled
from ..time import Time
# Keep only the celestial part of the axes
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.wcs.lng == -1 or wcs.wcs.lat == -1:
return None
radesys = wcs.wcs.radesys
if np.isnan(wcs.wcs.equinox):
equinox = None
else:
equinox = wcs.wcs.equinox
xcoord = wcs.wcs.ctype[0][:4]
ycoord = wcs.wcs.ctype[1][:4]
# Apply logic from FITS standard to determine the default radesys
if radesys == '' and xcoord == 'RA--' and ycoord == 'DEC-':
if equinox is None:
radesys = "ICRS"
elif equinox < 1984.:
radesys = "FK4"
else:
radesys = "FK5"
if radesys == 'FK4':
if equinox is not None:
equinox = Time(equinox, format='byear')
frame = FK4(equinox=equinox)
elif radesys == 'FK4-NO-E':
if equinox is not None:
equinox = Time(equinox, format='byear')
frame = FK4NoETerms(equinox=equinox)
elif radesys == 'FK5':
if equinox is not None:
equinox = Time(equinox, format='jyear')
frame = FK5(equinox=equinox)
elif radesys == 'ICRS':
frame = ICRS()
else:
if xcoord == 'GLON' and ycoord == 'GLAT':
frame = Galactic()
elif xcoord == 'TLON' and ycoord == 'TLAT':
frame = ITRS(obstime=wcs.wcs.dateobs or None)
else:
frame = None
return frame
def _celestial_frame_to_wcs_builtin(frame, projection='TAN'):
# Import astropy.coordinates here to avoid circular imports
from ..coordinates import BaseRADecFrame, FK4, FK4NoETerms, FK5, ICRS, ITRS, Galactic
# Create a 2-dimensional WCS
wcs = WCS(naxis=2)
if isinstance(frame, BaseRADecFrame):
xcoord = 'RA--'
ycoord = 'DEC-'
if isinstance(frame, ICRS):
wcs.wcs.radesys = 'ICRS'
elif isinstance(frame, FK4NoETerms):
wcs.wcs.radesys = 'FK4-NO-E'
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK4):
wcs.wcs.radesys = 'FK4'
wcs.wcs.equinox = frame.equinox.byear
elif isinstance(frame, FK5):
wcs.wcs.radesys = 'FK5'
wcs.wcs.equinox = frame.equinox.jyear
else:
return None
elif isinstance(frame, Galactic):
xcoord = 'GLON'
ycoord = 'GLAT'
elif isinstance(frame, ITRS):
xcoord = 'TLON'
ycoord = 'TLAT'
wcs.wcs.radesys = 'ITRS'
wcs.wcs.dateobs = frame.obstime.utc.isot
else:
return None
wcs.wcs.ctype = [xcoord + '-' + projection, ycoord + '-' + projection]
return wcs
WCS_FRAME_MAPPINGS = [[_wcs_to_celestial_frame_builtin]]
FRAME_WCS_MAPPINGS = [[_celestial_frame_to_wcs_builtin]]
class custom_wcs_to_frame_mappings:
def __init__(self, mappings=[]):
if hasattr(mappings, '__call__'):
mappings = [mappings]
WCS_FRAME_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
WCS_FRAME_MAPPINGS.pop()
# Backward-compatibility
custom_frame_mappings = custom_wcs_to_frame_mappings
class custom_frame_to_wcs_mappings:
def __init__(self, mappings=[]):
if hasattr(mappings, '__call__'):
mappings = [mappings]
FRAME_WCS_MAPPINGS.append(mappings)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
FRAME_WCS_MAPPINGS.pop()
def wcs_to_celestial_frame(wcs):
"""
For a given WCS, return the coordinate frame that matches the celestial
component of the WCS.
Parameters
----------
wcs : :class:`~astropy.wcs.WCS` instance
The WCS to find the frame for
Returns
-------
frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame`
subclass instance that best matches the specified WCS.
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a :class:`~astropy.wcs.WCS`
instance and should return either an instance of a frame, or `None` if no
matching frame was found. You can register this function temporarily with::
>>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_wcs_to_frame_mappings
>>> with custom_wcs_to_frame_mappings(my_function):
... wcs_to_celestial_frame(...)
"""
for mapping_set in WCS_FRAME_MAPPINGS:
for func in mapping_set:
frame = func(wcs)
if frame is not None:
return frame
raise ValueError("Could not determine celestial frame corresponding to "
"the specified WCS object")
def celestial_frame_to_wcs(frame, projection='TAN'):
"""
For a given coordinate frame, return the corresponding WCS object.
Note that the returned WCS object has only the elements corresponding to
coordinate frames set (e.g. ctype, equinox, radesys).
Parameters
----------
frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame`
subclass instance for which to find the WCS
projection : str
Projection code to use in ctype, if applicable
Returns
-------
wcs : :class:`~astropy.wcs.WCS` instance
The corresponding WCS object
Examples
--------
::
>>> from astropy.wcs.utils import celestial_frame_to_wcs
>>> from astropy.coordinates import FK5
>>> frame = FK5(equinox='J2010')
>>> wcs = celestial_frame_to_wcs(frame)
>>> wcs.to_header()
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection
CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection
CRVAL1 = 0.0 / [deg] Coordinate value at reference point
CRVAL2 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
RADESYS = 'FK5' / Equatorial coordinate system
EQUINOX = 2010.0 / [yr] Equinox of equatorial coordinates
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a
:class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass
instance and a projection (given as a string) and should return either a WCS
instance, or `None` if the WCS could not be determined. You can register
this function temporarily with::
>>> from astropy.wcs.utils import celestial_frame_to_wcs, custom_frame_to_wcs_mappings
>>> with custom_frame_to_wcs_mappings(my_function):
... celestial_frame_to_wcs(...)
"""
for mapping_set in FRAME_WCS_MAPPINGS:
for func in mapping_set:
wcs = func(frame, projection=projection)
if wcs is not None:
return wcs
raise ValueError("Could not determine WCS corresponding to the specified "
"coordinate frame.")
def proj_plane_pixel_scales(wcs):
"""
For a WCS returns pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <http://adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the scales corresponding to celestial axes only,
make sure that the input `~astropy.wcs.WCS` object contains
celestial axes only, e.g., by passing in the
`~astropy.wcs.WCS.celestial` WCS object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
scale : `~numpy.ndarray`
A vector (`~numpy.ndarray`) of projection plane increments
corresponding to each pixel side (axis). The units of the returned
results are the same as the units of `~astropy.wcs.Wcsprm.cdelt`,
`~astropy.wcs.Wcsprm.crval`, and `~astropy.wcs.Wcsprm.cd` for
the celestial WCS and can be obtained by inquiring the value
of `~astropy.wcs.Wcsprm.cunit` property of the input
`~astropy.wcs.WCS` WCS object.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
"""
return np.sqrt((wcs.pixel_scale_matrix**2).sum(axis=0, dtype=float))
def proj_plane_pixel_area(wcs):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`) returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <http://adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
In order to compute the area of pixels corresponding to celestial
axes only, this function uses the `~astropy.wcs.WCS.celestial` WCS
object of the input ``wcs``. This is different from the
`~astropy.wcs.utils.proj_plane_pixel_scales` function
that computes the scales for the axes of the input WCS itself.
Parameters
----------
wcs : `~astropy.wcs.WCS`
A world coordinate system object.
Returns
-------
area : float
Area (in the projection plane) of the pixel at ``CRPIX`` location.
The units of the returned result are the same as the units of
the `~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`,
and `~astropy.wcs.Wcsprm.cd` for the celestial WCS and can be
obtained by inquiring the value of `~astropy.wcs.Wcsprm.cunit`
property of the `~astropy.wcs.WCS.celestial` WCS object.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
"""
psm = wcs.celestial.pixel_scale_matrix
if psm.shape != (2, 2):
raise ValueError("Pixel area is defined only for 2D pixels.")
return np.abs(np.linalg.det(psm))
def is_proj_plane_distorted(wcs, maxerr=1.0e-5):
r"""
For a WCS returns `False` if square image (detector) pixels stay square
when projected onto the "plane of intermediate world coordinates"
as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <http://adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
It will return `True` if transformation from image (detector) coordinates
to the focal plane coordinates is non-orthogonal or if WCS contains
non-linear (e.g., SIP) distortions.
.. note::
Since this function is concerned **only** about the transformation
"image plane"->"focal plane" and **not** about the transformation
"celestial sphere"->"focal plane"->"image plane",
this function ignores distortions arising due to non-linear nature
of most projections.
Let's denote by *C* either the original or the reconstructed
(from ``PC`` and ``CDELT``) CD matrix. `is_proj_plane_distorted`
verifies that the transformation from image (detector) coordinates
to the focal plane coordinates is orthogonal using the following
check:
.. math::
\left \| \frac{C \cdot C^{\mathrm{T}}}
{| det(C)|} - I \right \|_{\mathrm{max}} < \epsilon .
Parameters
----------
wcs : `~astropy.wcs.WCS`
World coordinate system object
maxerr : float, optional
Accuracy to which the CD matrix, **normalized** such
that :math:`|det(CD)|=1`, should be close to being an
orthogonal matrix as described in the above equation
(see :math:`\epsilon`).
Returns
-------
distorted : bool
Returns `True` if focal (projection) plane is distorted and `False`
otherwise.
"""
cwcs = wcs.celestial
return (not _is_cd_orthogonal(cwcs.pixel_scale_matrix, maxerr) or
_has_distortion(cwcs))
def _is_cd_orthogonal(cd, maxerr):
shape = cd.shape
if not (len(shape) == 2 and shape[0] == shape[1]):
raise ValueError("CD (or PC) matrix must be a 2D square matrix.")
pixarea = np.abs(np.linalg.det(cd))
if (pixarea == 0.0):
raise ValueError("CD (or PC) matrix is singular.")
# NOTE: Technically, below we should use np.dot(cd, np.conjugate(cd.T))
# However, I am not aware of complex CD/PC matrices...
I = np.dot(cd, cd.T) / pixarea
cd_unitary_err = np.amax(np.abs(I - np.eye(shape[0])))
return (cd_unitary_err < maxerr)
def non_celestial_pixel_scales(inwcs):
"""
Calculate the pixel scale along each axis of a non-celestial WCS,
for example one with mixed spectral and spatial axes.
Parameters
----------
inwcs : `~astropy.wcs.WCS`
The world coordinate system object.
Returns
-------
scale : `numpy.ndarray`
The pixel scale along each axis.
"""
if inwcs.is_celestial:
raise ValueError("WCS is celestial, use celestial_pixel_scales instead")
pccd = inwcs.pixel_scale_matrix
if np.allclose(np.extract(1-np.eye(*pccd.shape), pccd), 0):
return np.abs(np.diagonal(pccd))*u.deg
else:
raise ValueError("WCS is rotated, cannot determine consistent pixel scales")
def _has_distortion(wcs):
"""
`True` if contains any SIP or image distortion components.
"""
return any(getattr(wcs, dist_attr) is not None
for dist_attr in ['cpdis1', 'cpdis2', 'det2im1', 'det2im2', 'sip'])
# TODO: in future, we should think about how the following two functions can be
# integrated better into the WCS class.
def skycoord_to_pixel(coords, wcs, origin=0, mode='all'):
"""
Convert a set of SkyCoord coordinates into pixels.
Parameters
----------
coords : `~astropy.coordinates.SkyCoord`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS needs
xw_unit = u.Unit(wcs.wcs.cunit[0])
yw_unit = u.Unit(wcs.wcs.cunit[1])
# Convert positions to frame
coords = coords.transform_to(frame)
# Extract longitude and latitude. We first try and use lon/lat directly,
# but if the representation is not spherical or unit spherical this will
# fail. We should then force the use of the unit spherical
# representation. We don't do that directly to make sure that we preserve
# custom lon/lat representations if available.
try:
lon = coords.data.lon.to(xw_unit)
lat = coords.data.lat.to(yw_unit)
except AttributeError:
lon = coords.spherical.lon.to(xw_unit)
lat = coords.spherical.lat.to(yw_unit)
# Convert to pixel coordinates
if mode == 'all':
xp, yp = wcs.all_world2pix(lon.value, lat.value, origin)
elif mode == 'wcs':
xp, yp = wcs.wcs_world2pix(lon.value, lat.value, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
return xp, yp
def pixel_to_skycoord(xp, yp, wcs, origin=0, mode='all', cls=None):
"""
Convert a set of pixel coordinates into a `~astropy.coordinates.SkyCoord`
coordinate.
Parameters
----------
xp, yp : float or `numpy.ndarray`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
cls : class or None
The class of object to create. Should be a
`~astropy.coordinates.SkyCoord` subclass. If None, defaults to
`~astropy.coordinates.SkyCoord`.
Returns
-------
coords : Whatever ``cls`` is (a subclass of `~astropy.coordinates.SkyCoord`)
The celestial coordinates
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
# Import astropy.coordinates here to avoid circular imports
from ..coordinates import SkyCoord, UnitSphericalRepresentation
# we have to do this instead of actually setting the default to SkyCoord
# because importing SkyCoord at the module-level leads to circular
# dependencies.
if cls is None:
cls = SkyCoord
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS gives
lon_unit = u.Unit(wcs.wcs.cunit[0])
lat_unit = u.Unit(wcs.wcs.cunit[1])
# Convert pixel coordinates to celestial coordinates
if mode == 'all':
lon, lat = wcs.all_pix2world(xp, yp, origin)
elif mode == 'wcs':
lon, lat = wcs.wcs_pix2world(xp, yp, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
# Add units to longitude/latitude
lon = lon * lon_unit
lat = lat * lat_unit
# Create a SkyCoord-like object
data = UnitSphericalRepresentation(lon=lon, lat=lat)
coords = cls(frame.realize_frame(data))
return coords
|
b9586ff03d59f635a27d6a322375e64e8b1900cb3b81d4d3f1b2a4b7c936fbdb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Under the hood, there are 3 separate classes that perform different
parts of the transformation:
- `~astropy.wcs.Wcsprm`: Is a direct wrapper of the core WCS
functionality in `wcslib`_. (This includes TPV and TPD
polynomial distortion, but not SIP distortion).
- `~astropy.wcs.Sip`: Handles polynomial distortion as defined in the
`SIP`_ convention.
- `~astropy.wcs.DistortionLookupTable`: Handles `distortion paper`_
lookup tables.
Additionally, the class `WCS` aggregates all of these transformations
together in a pipeline:
- Detector to image plane correction (by a pair of
`~astropy.wcs.DistortionLookupTable` objects).
- `SIP`_ distortion correction (by an underlying `~astropy.wcs.Sip`
object)
- `distortion paper`_ table-lookup correction (by a pair of
`~astropy.wcs.DistortionLookupTable` objects).
- `wcslib`_ WCS transformation (by a `~astropy.wcs.Wcsprm` object)
"""
# STDLIB
import copy
import io
import itertools
import os
import re
import textwrap
import warnings
import builtins
# THIRD-PARTY
import numpy as np
# LOCAL
from .. import log
from ..io import fits
from . import _docutil as __
try:
from . import _wcs
except ImportError:
if not _ASTROPY_SETUP_:
raise
else:
_wcs = None
from ..utils.compat import possible_filename
from ..utils.exceptions import AstropyWarning, AstropyUserWarning, AstropyDeprecationWarning
__all__ = ['FITSFixedWarning', 'WCS', 'find_all_wcs',
'DistortionLookupTable', 'Sip', 'Tabprm', 'Wcsprm',
'WCSBase', 'validate', 'WcsError', 'SingularMatrixError',
'InconsistentAxisTypesError', 'InvalidTransformError',
'InvalidCoordinateError', 'NoSolutionError',
'InvalidSubimageSpecificationError', 'NoConvergence',
'NonseparableSubimageCoordinateSystemError',
'NoWcsKeywordsFoundError', 'InvalidTabularParametersError']
__doctest_skip__ = ['WCS.all_world2pix']
if _wcs is not None:
_parsed_version = _wcs.__version__.split('.')
if int(_parsed_version[0]) == 5 and int(_parsed_version[1]) < 8:
raise ImportError(
"astropy.wcs is built with wcslib {0}, but only versions 5.8 and "
"later on the 5.x series are known to work. The version of wcslib "
"that ships with astropy may be used.")
if not _wcs._sanity_check():
raise RuntimeError(
"astropy.wcs did not pass its sanity check for your build "
"on your platform.")
WCSBase = _wcs._Wcs
DistortionLookupTable = _wcs.DistortionLookupTable
Sip = _wcs.Sip
Wcsprm = _wcs.Wcsprm
Tabprm = _wcs.Tabprm
WcsError = _wcs.WcsError
SingularMatrixError = _wcs.SingularMatrixError
InconsistentAxisTypesError = _wcs.InconsistentAxisTypesError
InvalidTransformError = _wcs.InvalidTransformError
InvalidCoordinateError = _wcs.InvalidCoordinateError
NoSolutionError = _wcs.NoSolutionError
InvalidSubimageSpecificationError = _wcs.InvalidSubimageSpecificationError
NonseparableSubimageCoordinateSystemError = _wcs.NonseparableSubimageCoordinateSystemError
NoWcsKeywordsFoundError = _wcs.NoWcsKeywordsFoundError
InvalidTabularParametersError = _wcs.InvalidTabularParametersError
# Copy all the constants from the C extension into this module's namespace
for key, val in _wcs.__dict__.items():
if key.startswith(('WCSSUB', 'WCSHDR', 'WCSHDO')):
locals()[key] = val
__all__.append(key)
else:
WCSBase = object
Wcsprm = object
DistortionLookupTable = object
Sip = object
Tabprm = object
WcsError = None
SingularMatrixError = None
InconsistentAxisTypesError = None
InvalidTransformError = None
InvalidCoordinateError = None
NoSolutionError = None
InvalidSubimageSpecificationError = None
NonseparableSubimageCoordinateSystemError = None
NoWcsKeywordsFoundError = None
InvalidTabularParametersError = None
# Additional relax bit flags
WCSHDO_SIP = 0x80000
# Regular expression defining SIP keyword It matches keyword that starts with A
# or B, optionally followed by P, followed by an underscore then a number in
# range of 0-19, followed by an underscore and another number in range of 0-19.
# Keyword optionally ends with a capital letter.
SIP_KW = re.compile('''^[AB]P?_1?[0-9]_1?[0-9][A-Z]?$''')
def _parse_keysel(keysel):
keysel_flags = 0
if keysel is not None:
for element in keysel:
if element.lower() == 'image':
keysel_flags |= _wcs.WCSHDR_IMGHEAD
elif element.lower() == 'binary':
keysel_flags |= _wcs.WCSHDR_BIMGARR
elif element.lower() == 'pixel':
keysel_flags |= _wcs.WCSHDR_PIXLIST
else:
raise ValueError(
"keysel must be a list of 'image', 'binary' " +
"and/or 'pixel'")
else:
keysel_flags = -1
return keysel_flags
class NoConvergence(Exception):
"""
An error class used to report non-convergence and/or divergence
of numerical methods. It is used to report errors in the
iterative solution used by
the :py:meth:`~astropy.wcs.WCS.all_world2pix`.
Attributes
----------
best_solution : `numpy.ndarray`
Best solution achieved by the numerical method.
accuracy : `numpy.ndarray`
Accuracy of the ``best_solution``.
niter : `int`
Number of iterations performed by the numerical method
to compute ``best_solution``.
divergent : None, `numpy.ndarray`
Indices of the points in ``best_solution`` array
for which the solution appears to be divergent. If the
solution does not diverge, ``divergent`` will be set to `None`.
slow_conv : None, `numpy.ndarray`
Indices of the solutions in ``best_solution`` array
for which the solution failed to converge within the
specified maximum number of iterations. If there are no
non-converging solutions (i.e., if the required accuracy
has been achieved for all input data points)
then ``slow_conv`` will be set to `None`.
"""
def __init__(self, *args, best_solution=None, accuracy=None, niter=None,
divergent=None, slow_conv=None, **kwargs):
super().__init__(*args)
self.best_solution = best_solution
self.accuracy = accuracy
self.niter = niter
self.divergent = divergent
self.slow_conv = slow_conv
if kwargs:
warnings.warn("Function received unexpected arguments ({}) these "
"are ignored but will raise an Exception in the "
"future.".format(list(kwargs)),
AstropyDeprecationWarning)
class FITSFixedWarning(AstropyWarning):
"""
The warning raised when the contents of the FITS header have been
modified to be standards compliant.
"""
pass
class WCS(WCSBase):
"""WCS objects perform standard WCS transformations, and correct for
`SIP`_ and `distortion paper`_ table-lookup transformations, based
on the WCS keywords and supplementary data read from a FITS file.
Parameters
----------
header : astropy.io.fits header object, Primary HDU, Image HDU, string, dict-like, or None, optional
If *header* is not provided or None, the object will be
initialized to default values.
fobj : An astropy.io.fits file (hdulist) object, optional
It is needed when header keywords point to a `distortion
paper`_ lookup table stored in a different extension.
key : str, optional
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the
``\"a\"`` part of the ``CTYPEia`` cards. *key* may only be
provided if *header* is also provided.
minerr : float, optional
The minimum value a distortion correction must have in order
to be applied. If the value of ``CQERRja`` is smaller than
*minerr*, the corresponding distortion is not applied.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions
of the WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`relaxread` for details.
naxis : int or sequence, optional
Extracts specific coordinate axes using
:meth:`~astropy.wcs.Wcsprm.sub`. If a header is provided, and
*naxis* is not ``None``, *naxis* will be passed to
:meth:`~astropy.wcs.Wcsprm.sub` in order to select specific
axes from the header. See :meth:`~astropy.wcs.Wcsprm.sub` for
more details about this parameter.
keysel : sequence of flags, optional
A sequence of flags used to select the keyword types
considered by wcslib. When ``None``, only the standard image
header keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following
strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
colsel : sequence of int, optional
A sequence of table column numbers used to restrict the WCS
transformations considered to only those pertaining to the
specified columns. If `None`, there is no restriction.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting object to fix any non-standard uses in the
header. `FITSFixedWarning` Warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid key.
KeyError
Key not found in FITS header.
ValueError
Lookup table distortion present in the header but *fobj* was
not provided.
Notes
-----
1. astropy.wcs supports arbitrary *n* dimensions for the core WCS
(the transformations handled by WCSLIB). However, the
`distortion paper`_ lookup table and `SIP`_ distortions must be
two dimensional. Therefore, if you try to create a WCS object
where the core WCS has a different number of dimensions than 2
and that object also contains a `distortion paper`_ lookup
table or `SIP`_ distortion, a `ValueError`
exception will be raised. To avoid this, consider using the
*naxis* kwarg to select two dimensions from the core WCS.
2. The number of coordinate axes in the transformation is not
determined directly from the ``NAXIS`` keyword but instead from
the highest of:
- ``NAXIS`` keyword
- ``WCSAXESa`` keyword
- The highest axis number in any parameterized WCS keyword.
The keyvalue, as well as the keyword, must be
syntactically valid otherwise it will not be considered.
If none of these keyword types is present, i.e. if the header
only contains auxiliary WCS keywords for a particular
coordinate representation, then no coordinate description is
constructed for it.
The number of axes, which is set as the ``naxis`` member, may
differ for different coordinate representations of the same
image.
3. When the header includes duplicate keywords, in most cases the
last encountered is used.
4. `~astropy.wcs.Wcsprm.set` is called immediately after
construction, so any invalid keywords or transformations will
be raised by the constructor, not when subsequently calling a
transformation method.
"""
def __init__(self, header=None, fobj=None, key=' ', minerr=0.0,
relax=True, naxis=None, keysel=None, colsel=None,
fix=True, translate_units='', _do_set=True):
close_fds = []
if header is None:
if naxis is None:
naxis = 2
wcsprm = _wcs.Wcsprm(header=None, key=key,
relax=relax, naxis=naxis)
self.naxis = wcsprm.naxis
# Set some reasonable defaults.
det2im = (None, None)
cpdis = (None, None)
sip = None
else:
keysel_flags = _parse_keysel(keysel)
if isinstance(header, (str, bytes)):
try:
is_path = (possible_filename(header) and
os.path.exists(header))
except (OSError, ValueError):
is_path = False
if is_path:
if fobj is not None:
raise ValueError(
"Can not provide both a FITS filename to "
"argument 1 and a FITS file object to argument 2")
fobj = fits.open(header)
close_fds.append(fobj)
header = fobj[0].header
elif isinstance(header, fits.hdu.image._ImageBaseHDU):
header = header.header
elif not isinstance(header, fits.Header):
try:
# Accept any dict-like object
orig_header = header
header = fits.Header()
for dict_key in orig_header.keys():
header[dict_key] = orig_header[dict_key]
except TypeError:
raise TypeError(
"header must be a string, an astropy.io.fits.Header "
"object, or a dict-like object")
if isinstance(header, fits.Header):
header_string = header.tostring().rstrip()
else:
header_string = header
# Importantly, header is a *copy* of the passed-in header
# because we will be modifying it
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode('ascii')
try:
tmp_header = fits.Header.fromstring(header_string)
self._remove_sip_kw(tmp_header)
tmp_header_bytes = tmp_header.tostring().rstrip()
if isinstance(tmp_header_bytes, str):
tmp_header_bytes = tmp_header_bytes.encode('ascii')
tmp_wcsprm = _wcs.Wcsprm(header=tmp_header_bytes, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel, warnings=False)
except _wcs.NoWcsKeywordsFoundError:
est_naxis = 0
else:
if naxis is not None:
try:
tmp_wcsprm.sub(naxis)
except ValueError:
pass
est_naxis = tmp_wcsprm.naxis
else:
est_naxis = 2
header = fits.Header.fromstring(header_string)
if est_naxis == 0:
est_naxis = 2
self.naxis = est_naxis
det2im = self._read_det2im_kw(header, fobj, err=minerr)
cpdis = self._read_distortion_kw(
header, fobj, dist='CPDIS', err=minerr)
sip = self._read_sip_kw(header, wcskey=key)
self._remove_sip_kw(header)
header_string = header.tostring()
header_string = header_string.replace('END' + ' ' * 77, '')
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode('ascii')
try:
wcsprm = _wcs.Wcsprm(header=header_bytes, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel)
except _wcs.NoWcsKeywordsFoundError:
# The header may have SIP or distortions, but no core
# WCS. That isn't an error -- we want a "default"
# (identity) core Wcs transformation in that case.
if colsel is None:
wcsprm = _wcs.Wcsprm(header=None, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel)
else:
raise
if naxis is not None:
wcsprm = wcsprm.sub(naxis)
self.naxis = wcsprm.naxis
if (wcsprm.naxis != 2 and
(det2im[0] or det2im[1] or cpdis[0] or cpdis[1] or sip)):
raise ValueError(
"""
FITS WCS distortion paper lookup tables and SIP distortions only work
in 2 dimensions. However, WCSLIB has detected {0} dimensions in the
core WCS keywords. To use core WCS in conjunction with FITS WCS
distortion paper lookup tables or SIP distortion, you must select or
reduce these to 2 dimensions using the naxis kwarg.
""".format(wcsprm.naxis))
header_naxis = header.get('NAXIS', None)
if header_naxis is not None and header_naxis < wcsprm.naxis:
warnings.warn(
"The WCS transformation has more axes ({0:d}) than the "
"image it is associated with ({1:d})".format(
wcsprm.naxis, header_naxis), FITSFixedWarning)
self._get_naxis(header)
WCSBase.__init__(self, sip, cpdis, wcsprm, det2im)
if fix:
self.fix(translate_units=translate_units)
if _do_set:
self.wcs.set()
for fd in close_fds:
fd.close()
def __copy__(self):
new_copy = self.__class__()
WCSBase.__init__(new_copy, self.sip,
(self.cpdis1, self.cpdis2),
self.wcs,
(self.det2im1, self.det2im2))
new_copy.__dict__.update(self.__dict__)
return new_copy
def __deepcopy__(self, memo):
from copy import deepcopy
new_copy = self.__class__()
new_copy.naxis = deepcopy(self.naxis, memo)
WCSBase.__init__(new_copy, deepcopy(self.sip, memo),
(deepcopy(self.cpdis1, memo),
deepcopy(self.cpdis2, memo)),
deepcopy(self.wcs, memo),
(deepcopy(self.det2im1, memo),
deepcopy(self.det2im2, memo)))
for key, val in self.__dict__.items():
new_copy.__dict__[key] = deepcopy(val, memo)
return new_copy
def copy(self):
"""
Return a shallow copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
.. warning::
Use `deepcopy` instead of `copy` unless you know why you need a
shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Return a deep copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
"""
return copy.deepcopy(self)
def sub(self, axes=None):
copy = self.deepcopy()
copy.wcs = self.wcs.sub(axes)
copy.naxis = copy.wcs.naxis
return copy
if _wcs is not None:
sub.__doc__ = _wcs.Wcsprm.sub.__doc__
def _fix_scamp(self):
"""
Remove SCAMP's PVi_m distortion parameters if SIP distortion parameters
are also present. Some projects (e.g., Palomar Transient Factory)
convert SCAMP's distortion parameters (which abuse the PVi_m cards) to
SIP. However, wcslib gets confused by the presence of both SCAMP and
SIP distortion parameters.
See https://github.com/astropy/astropy/issues/299.
"""
# Nothing to be done if no WCS attached
if self.wcs is None:
return
# Nothing to be done if no PV parameters attached
pv = self.wcs.get_pv()
if not pv:
return
# Nothing to be done if axes don't use SIP distortion parameters
if self.sip is None:
return
# Nothing to be done if any radial terms are present...
# Loop over list to find any radial terms.
# Certain values of the `j' index are used for storing
# radial terms; refer to Equation (1) in
# <http://web.ipac.caltech.edu/staff/shupe/reprints/SIP_to_PV_SPIE2012.pdf>.
pv = np.asarray(pv)
# Loop over distinct values of `i' index
for i in set(pv[:, 0]):
# Get all values of `j' index for this value of `i' index
js = set(pv[:, 1][pv[:, 0] == i])
# Find max value of `j' index
max_j = max(js)
for j in (3, 11, 23, 39):
if j < max_j and j in js:
return
self.wcs.set_pv([])
warnings.warn("Removed redundant SCAMP distortion parameters " +
"because SIP parameters are also present", FITSFixedWarning)
def fix(self, translate_units='', naxis=None):
"""
Perform the fix operations from wcslib, and warn about any
changes it has made.
Parameters
----------
translate_units : str, optional
Specify which potentially unsafe translations of
non-standard unit strings to perform. By default,
performs none.
Although ``"S"`` is commonly used to represent seconds,
its translation to ``"s"`` is potentially unsafe since the
standard recognizes ``"S"`` formally as Siemens, however
rarely that may be used. The same applies to ``"H"`` for
hours (Henry), and ``"D"`` for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``"s"``, translate ``"S"`` to
``"s"``.
- If the string contains ``"h"``, translate ``"H"`` to
``"h"``.
- If the string contains ``"d"``, translate ``"D"`` to
``"d"``.
Thus ``''`` doesn't do any unsafe translations, whereas
``'shd'`` does all of them.
naxis : int array[naxis], optional
Image axis lengths. If this array is set to zero or
``None``, then `~astropy.wcs.Wcsprm.cylfix` will not be
invoked.
"""
if self.wcs is not None:
self._fix_scamp()
fixes = self.wcs.fix(translate_units, naxis)
for key, val in fixes.items():
if val != "No change":
warnings.warn(
("'{0}' made the change '{1}'.").
format(key, val),
FITSFixedWarning)
def calc_footprint(self, header=None, undistort=True, axes=None, center=True):
"""
Calculates the footprint of the image on the sky.
A footprint is defined as the positions of the corners of the
image on the sky after all available distortions have been
applied.
Parameters
----------
header : `~astropy.io.fits.Header` object, optional
Used to get ``NAXIS1`` and ``NAXIS2``
header and axes are mutually exclusive, alternative ways
to provide the same information.
undistort : bool, optional
If `True`, take SIP and distortion lookup table into
account
axes : length 2 sequence ints, optional
If provided, use the given sequence as the shape of the
image. Otherwise, use the ``NAXIS1`` and ``NAXIS2``
keywords from the header that was used to create this
`WCS` object.
center : bool, optional
If `True` use the center of the pixel, otherwise use the corner.
Returns
-------
coord : (4, 2) array of (*x*, *y*) coordinates.
The order is clockwise starting with the bottom left corner.
"""
if axes is not None:
naxis1, naxis2 = axes
else:
if header is None:
try:
# classes that inherit from WCS and define naxis1/2
# do not require a header parameter
naxis1 = self._naxis1
naxis2 = self._naxis2
except AttributeError:
warnings.warn("Need a valid header in order to calculate footprint\n", AstropyUserWarning)
return None
else:
naxis1 = header.get('NAXIS1', None)
naxis2 = header.get('NAXIS2', None)
if naxis1 is None or naxis2 is None:
raise ValueError(
"Image size could not be determined.")
if center:
corners = np.array([[1, 1],
[1, naxis2],
[naxis1, naxis2],
[naxis1, 1]], dtype=np.float64)
else:
corners = np.array([[0.5, 0.5],
[0.5, naxis2 + 0.5],
[naxis1 + 0.5, naxis2 + 0.5],
[naxis1 + 0.5, 0.5]], dtype=np.float64)
if undistort:
return self.all_pix2world(corners, 1)
else:
return self.wcs_pix2world(corners, 1)
def _read_det2im_kw(self, header, fobj, err=0.0):
"""
Create a `distortion paper`_ type lookup table for detector to
image plane correction.
"""
if fobj is None:
return (None, None)
if not isinstance(fobj, fits.HDUList):
return (None, None)
try:
axiscorr = header[str('AXISCORR')]
d2imdis = self._read_d2im_old_format(header, fobj, axiscorr)
return d2imdis
except KeyError:
pass
dist = 'D2IMDIS'
d_kw = 'D2IM'
err_kw = 'D2IMERR'
tables = {}
for i in range(1, self.naxis + 1):
d_error = header.get(err_kw + str(i), 0.0)
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
if dis == 'lookup':
del header[distortion]
assert isinstance(fobj, fits.HDUList), ('An astropy.io.fits.HDUList'
'is required for Lookup table distortion.')
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + str('.EXTVER')
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + str('.AXIS.{0:d}').format(i)
if i == header[dp_axis_key]:
d_data = fobj[str('D2IMARR'), d_extver].data
else:
d_data = (fobj[str('D2IMARR'), d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj[str('D2IMARR'), d_extver].header
d_crpix = (d_header.get(str('CRPIX1'), 0.0), d_header.get(str('CRPIX2'), 0.0))
d_crval = (d_header.get(str('CRVAL1'), 0.0), d_header.get(str('CRVAL2'), 0.0))
d_cdelt = (d_header.get(str('CDELT1'), 1.0), d_header.get(str('CDELT2'), 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix,
d_crval, d_cdelt)
tables[i] = d_lookup
else:
warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning)
for key in list(header):
if key.startswith(dp + str('.')):
del header[key]
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _read_d2im_old_format(self, header, fobj, axiscorr):
warnings.warn("The use of ``AXISCORR`` for D2IM correction has been deprecated."
"`~astropy.wcs` will read in files with ``AXISCORR`` but ``to_fits()`` will write "
"out files without it.",
AstropyDeprecationWarning)
cpdis = [None, None]
crpix = [0., 0.]
crval = [0., 0.]
cdelt = [1., 1.]
try:
d2im_data = fobj[(str('D2IMARR'), 1)].data
except KeyError:
return (None, None)
except AttributeError:
return (None, None)
d2im_data = np.array([d2im_data])
d2im_hdr = fobj[(str('D2IMARR'), 1)].header
naxis = d2im_hdr[str('NAXIS')]
for i in range(1, naxis + 1):
crpix[i - 1] = d2im_hdr.get(str('CRPIX') + str(i), 0.0)
crval[i - 1] = d2im_hdr.get(str('CRVAL') + str(i), 0.0)
cdelt[i - 1] = d2im_hdr.get(str('CDELT') + str(i), 1.0)
cpdis = DistortionLookupTable(d2im_data, crpix, crval, cdelt)
if axiscorr == 1:
return (cpdis, None)
elif axiscorr == 2:
return (None, cpdis)
else:
warnings.warn("Expected AXISCORR to be 1 or 2", AstropyUserWarning)
return (None, None)
def _write_det2im(self, hdulist):
"""
Writes a `distortion paper`_ type lookup table to the given
`astropy.io.fits.HDUList`.
"""
if self.det2im1 is None and self.det2im2 is None:
return
dist = 'D2IMDIS'
d_kw = 'D2IM'
err_kw = 'D2IMERR'
def write_d2i(num, det2im):
if det2im is None:
return
str('{0}{1:d}').format(dist, num),
hdulist[0].header[str('{0}{1:d}').format(dist, num)] = (
'LOOKUP', 'Detector to image correction type')
hdulist[0].header[str('{0}{1:d}.EXTVER').format(d_kw, num)] = (
num, 'Version number of WCSDVARR extension')
hdulist[0].header[str('{0}{1:d}.NAXES').format(d_kw, num)] = (
len(det2im.data.shape), 'Number of independent variables in d2im function')
for i in range(det2im.data.ndim):
hdulist[0].header[str('{0}{1:d}.AXIS.{2:d}').format(d_kw, num, i + 1)] = (
i + 1, 'Axis number of the jth independent variable in a d2im function')
image = fits.ImageHDU(det2im.data, name=str('D2IMARR'))
header = image.header
header[str('CRPIX1')] = (det2im.crpix[0],
'Coordinate system reference pixel')
header[str('CRPIX2')] = (det2im.crpix[1],
'Coordinate system reference pixel')
header[str('CRVAL1')] = (det2im.crval[0],
'Coordinate system value at reference pixel')
header[str('CRVAL2')] = (det2im.crval[1],
'Coordinate system value at reference pixel')
header[str('CDELT1')] = (det2im.cdelt[0],
'Coordinate increment along axis')
header[str('CDELT2')] = (det2im.cdelt[1],
'Coordinate increment along axis')
image.ver = int(hdulist[0].header[str('{0}{1:d}.EXTVER').format(d_kw, num)])
hdulist.append(image)
write_d2i(1, self.det2im1)
write_d2i(2, self.det2im2)
def _read_distortion_kw(self, header, fobj, dist='CPDIS', err=0.0):
"""
Reads `distortion paper`_ table-lookup keywords and data, and
returns a 2-tuple of `~astropy.wcs.DistortionLookupTable`
objects.
If no `distortion paper`_ keywords are found, ``(None, None)``
is returned.
"""
if isinstance(header, (str, bytes)):
return (None, None)
if dist == 'CPDIS':
d_kw = str('DP')
err_kw = str('CPERR')
else:
d_kw = str('DQ')
err_kw = str('CQERR')
tables = {}
for i in range(1, self.naxis + 1):
d_error_key = err_kw + str(i)
if d_error_key in header:
d_error = header[d_error_key]
del header[d_error_key]
else:
d_error = 0.0
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
del header[distortion]
if dis == 'lookup':
if not isinstance(fobj, fits.HDUList):
raise ValueError('an astropy.io.fits.HDUList is '
'required for Lookup table distortion.')
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + str('.EXTVER')
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + str('.AXIS.{0:d}'.format(i))
if i == header[dp_axis_key]:
d_data = fobj[str('WCSDVARR'), d_extver].data
else:
d_data = (fobj[str('WCSDVARR'), d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj[str('WCSDVARR'), d_extver].header
d_crpix = (d_header.get(str('CRPIX1'), 0.0),
d_header.get(str('CRPIX2'), 0.0))
d_crval = (d_header.get(str('CRVAL1'), 0.0),
d_header.get(str('CRVAL2'), 0.0))
d_cdelt = (d_header.get(str('CDELT1'), 1.0),
d_header.get(str('CDELT2'), 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt)
tables[i] = d_lookup
for key in list(header):
if key.startswith(dp + str('.')):
del header[key]
else:
warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning)
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _write_distortion_kw(self, hdulist, dist='CPDIS'):
"""
Write out `distortion paper`_ keywords to the given
`fits.HDUList`.
"""
if self.cpdis1 is None and self.cpdis2 is None:
return
if dist == 'CPDIS':
d_kw = str('DP')
err_kw = str('CPERR')
else:
d_kw = str('DQ')
err_kw = str('CQERR')
def write_dist(num, cpdis):
if cpdis is None:
return
hdulist[0].header[str('{0}{1:d}').format(dist, num)] = (
'LOOKUP', 'Prior distortion function type')
hdulist[0].header[str('{0}{1:d}.EXTVER').format(d_kw, num)] = (
num, 'Version number of WCSDVARR extension')
hdulist[0].header[str('{0}{1:d}.NAXES').format(d_kw, num)] = (
len(cpdis.data.shape), 'Number of independent variables in distortion function')
for i in range(cpdis.data.ndim):
hdulist[0].header[str('{0}{1:d}.AXIS.{2:d}').format(d_kw, num, i + 1)] = (
i + 1,
'Axis number of the jth independent variable in a distortion function')
image = fits.ImageHDU(cpdis.data, name=str('WCSDVARR'))
header = image.header
header[str('CRPIX1')] = (cpdis.crpix[0], 'Coordinate system reference pixel')
header[str('CRPIX2')] = (cpdis.crpix[1], 'Coordinate system reference pixel')
header[str('CRVAL1')] = (cpdis.crval[0], 'Coordinate system value at reference pixel')
header[str('CRVAL2')] = (cpdis.crval[1], 'Coordinate system value at reference pixel')
header[str('CDELT1')] = (cpdis.cdelt[0], 'Coordinate increment along axis')
header[str('CDELT2')] = (cpdis.cdelt[1], 'Coordinate increment along axis')
image.ver = int(hdulist[0].header[str('{0}{1:d}.EXTVER').format(d_kw, num)])
hdulist.append(image)
write_dist(1, self.cpdis1)
write_dist(2, self.cpdis2)
def _remove_sip_kw(self, header):
"""
Remove SIP information from a header.
"""
# Never pass SIP coefficients to wcslib
# CTYPE must be passed with -SIP to wcslib
for key in (m.group() for m in map(SIP_KW.match, list(header))
if m is not None):
del header[key]
def _read_sip_kw(self, header, wcskey=""):
"""
Reads `SIP`_ header keywords and returns a `~astropy.wcs.Sip`
object.
If no `SIP`_ header keywords are found, ``None`` is returned.
"""
if isinstance(header, (str, bytes)):
# TODO: Parse SIP from a string without pyfits around
return None
if str("A_ORDER") in header and header[str('A_ORDER')] > 1:
if str("B_ORDER") not in header:
raise ValueError(
"A_ORDER provided without corresponding B_ORDER "
"keyword for SIP distortion")
m = int(header[str("A_ORDER")])
a = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = str("A_{0}_{1}").format(i, j)
if key in header:
a[i, j] = header[key]
del header[key]
m = int(header[str("B_ORDER")])
if m > 1:
b = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = str("B_{0}_{1}").format(i, j)
if key in header:
b[i, j] = header[key]
del header[key]
else:
a = None
b = None
del header[str('A_ORDER')]
del header[str('B_ORDER')]
ctype = [header['CTYPE{0}{1}'.format(nax, wcskey)] for nax in range(1, self.naxis + 1)]
if any(not ctyp.endswith('-SIP') for ctyp in ctype):
message = """
Inconsistent SIP distortion information is present in the FITS header and the WCS object:
SIP coefficients were detected, but CTYPE is missing a "-SIP" suffix.
astropy.wcs is using the SIP distortion coefficients,
therefore the coordinates calculated here might be incorrect.
If you do not want to apply the SIP distortion coefficients,
please remove the SIP coefficients from the FITS header or the
WCS object. As an example, if the image is already distortion-corrected
(e.g., drizzled) then distortion components should not apply and the SIP
coefficients should be removed.
While the SIP distortion coefficients are being applied here, if that was indeed the intent,
for consistency please append "-SIP" to the CTYPE in the FITS header or the WCS object.
"""
log.info(message)
elif str("B_ORDER") in header and header[str('B_ORDER')] > 1:
raise ValueError(
"B_ORDER provided without corresponding A_ORDER " +
"keyword for SIP distortion")
else:
a = None
b = None
if str("AP_ORDER") in header and header[str('AP_ORDER')] > 1:
if str("BP_ORDER") not in header:
raise ValueError(
"AP_ORDER provided without corresponding BP_ORDER "
"keyword for SIP distortion")
m = int(header[str("AP_ORDER")])
ap = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = str("AP_{0}_{1}").format(i, j)
if key in header:
ap[i, j] = header[key]
del header[key]
m = int(header[str("BP_ORDER")])
if m > 1:
bp = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = str("BP_{0}_{1}").format(i, j)
if key in header:
bp[i, j] = header[key]
del header[key]
else:
ap = None
bp = None
del header[str('AP_ORDER')]
del header[str('BP_ORDER')]
elif str("BP_ORDER") in header and header[str('BP_ORDER')] > 1:
raise ValueError(
"BP_ORDER provided without corresponding AP_ORDER "
"keyword for SIP distortion")
else:
ap = None
bp = None
if a is None and b is None and ap is None and bp is None:
return None
if str("CRPIX1{0}".format(wcskey)) not in header or str("CRPIX2{0}".format(wcskey)) not in header:
raise ValueError(
"Header has SIP keywords without CRPIX keywords")
crpix1 = header.get("CRPIX1{0}".format(wcskey))
crpix2 = header.get("CRPIX2{0}".format(wcskey))
return Sip(a, b, ap, bp, (crpix1, crpix2))
def _write_sip_kw(self):
"""
Write out SIP keywords. Returns a dictionary of key-value
pairs.
"""
if self.sip is None:
return {}
keywords = {}
def write_array(name, a):
if a is None:
return
size = a.shape[0]
keywords[str('{0}_ORDER').format(name)] = size - 1
for i in range(size):
for j in range(size - i):
if a[i, j] != 0.0:
keywords[
str('{0}_{1:d}_{2:d}').format(name, i, j)] = a[i, j]
write_array(str('A'), self.sip.a)
write_array(str('B'), self.sip.b)
write_array(str('AP'), self.sip.ap)
write_array(str('BP'), self.sip.bp)
return keywords
def _denormalize_sky(self, sky):
if self.wcs.lngtyp != 'RA':
raise ValueError(
"WCS does not have longitude type of 'RA', therefore " +
"(ra, dec) data can not be used as input")
if self.wcs.lattyp != 'DEC':
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore " +
"(ra, dec) data can not be used as input")
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial " +
"axes, therefore (ra, dec) data can not be used as input")
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude "
"celestial axes, therefore (ra, dec) data can not be " +
"used as input")
out = np.zeros((sky.shape[0], self.wcs.naxis))
out[:, self.wcs.lng] = sky[:, 0]
out[:, self.wcs.lat] = sky[:, 1]
return out
def _normalize_sky(self, sky):
if self.wcs.lngtyp != 'RA':
raise ValueError(
"WCS does not have longitude type of 'RA', therefore " +
"(ra, dec) data can not be returned")
if self.wcs.lattyp != 'DEC':
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore " +
"(ra, dec) data can not be returned")
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned")
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned")
out = np.empty((sky.shape[0], 2))
out[:, 0] = sky[:, self.wcs.lng]
out[:, 1] = sky[:, self.wcs.lat]
return out
def _array_converter(self, func, sky, *args, ra_dec_order=False):
"""
A helper function to support reading either a pair of arrays
or a single Nx2 array.
"""
def _return_list_of_arrays(axes, origin):
try:
axes = np.broadcast_arrays(*axes)
except ValueError:
raise ValueError(
"Coordinate arrays are not broadcastable to each other")
xy = np.hstack([x.reshape((x.size, 1)) for x in axes])
if ra_dec_order and sky == 'input':
xy = self._denormalize_sky(xy)
output = func(xy, origin)
if ra_dec_order and sky == 'output':
output = self._normalize_sky(output)
return (output[:, 0].reshape(axes[0].shape),
output[:, 1].reshape(axes[0].shape))
return [output[:, i].reshape(axes[0].shape)
for i in range(output.shape[1])]
def _return_single_array(xy, origin):
if xy.shape[-1] != self.naxis:
raise ValueError(
"When providing two arguments, the array must be "
"of shape (N, {0})".format(self.naxis))
if ra_dec_order and sky == 'input':
xy = self._denormalize_sky(xy)
result = func(xy, origin)
if ra_dec_order and sky == 'output':
result = self._normalize_sky(result)
return result
if len(args) == 2:
try:
xy, origin = args
xy = np.asarray(xy)
origin = int(origin)
except Exception:
raise TypeError(
"When providing two arguments, they must be "
"(coords[N][{0}], origin)".format(self.naxis))
if self.naxis == 1 and len(xy.shape) == 1:
return _return_list_of_arrays([xy], origin)
return _return_single_array(xy, origin)
elif len(args) == self.naxis + 1:
axes = args[:-1]
origin = args[-1]
try:
axes = [np.asarray(x) for x in axes]
origin = int(origin)
except Exception:
raise TypeError(
"When providing more than two arguments, they must be " +
"a 1-D array for each axis, followed by an origin.")
return _return_list_of_arrays(axes, origin)
raise TypeError(
"WCS projection has {0} dimensions, so expected 2 (an Nx{0} array "
"and the origin argument) or {1} arguments (the position in each "
"dimension, and the origin argument). Instead, {2} arguments were "
"given.".format(
self.naxis, self.naxis + 1, len(args)))
def all_pix2world(self, *args, **kwargs):
return self._array_converter(
self._all_pix2world, 'output', *args, **kwargs)
all_pix2world.__doc__ = """
Transforms pixel coordinates to world coordinates.
Performs all of the following in series:
- Detector to image plane correction (if present in the
FITS file)
- `SIP`_ distortion correction (if present in the FITS
file)
- `distortion paper`_ table-lookup correction (if present
in the FITS file)
- `wcslib`_ "core" WCS transformation
Parameters
----------
{0}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{1}
Returns
-------
{2}
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
""".format(__.TWO_OR_MORE_ARGS('naxis', 8),
__.RA_DEC_ORDER(8),
__.RETURNS('sky coordinates, in degrees', 8))
def wcs_pix2world(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.p2s(xy, o)['world'],
'output', *args, **kwargs)
wcs_pix2world.__doc__ = """
Transforms pixel coordinates to world coordinates by doing
only the basic `wcslib`_ transformation.
No `SIP`_ or `distortion paper`_ table lookup correction is
applied. To perform distortion correction, see
`~astropy.wcs.WCS.all_pix2world`,
`~astropy.wcs.WCS.sip_pix2foc`, `~astropy.wcs.WCS.p4_pix2foc`,
or `~astropy.wcs.WCS.pix2foc`.
Parameters
----------
{0}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{1}
Returns
-------
{2}
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
""".format(__.TWO_OR_MORE_ARGS('naxis', 8),
__.RA_DEC_ORDER(8),
__.RETURNS('world coordinates, in degrees', 8))
def _all_world2pix(self, world, origin, tolerance, maxiter, adaptive,
detect_divergence, quiet):
# ############################################################
# # DESCRIPTION OF THE NUMERICAL METHOD ##
# ############################################################
# In this section I will outline the method of solving
# the inverse problem of converting world coordinates to
# pixel coordinates (*inverse* of the direct transformation
# `all_pix2world`) and I will summarize some of the aspects
# of the method proposed here and some of the issues of the
# original `all_world2pix` (in relation to this method)
# discussed in https://github.com/astropy/astropy/issues/1977
# A more detailed discussion can be found here:
# https://github.com/astropy/astropy/pull/2373
#
#
# ### Background ###
#
#
# I will refer here to the [SIP Paper]
# (http://fits.gsfc.nasa.gov/registry/sip/SIP_distortion_v1_0.pdf).
# According to this paper, the effect of distortions as
# described in *their* equation (1) is:
#
# (1) x = CD*(u+f(u)),
#
# where `x` is a *vector* of "intermediate spherical
# coordinates" (equivalent to (x,y) in the paper) and `u`
# is a *vector* of "pixel coordinates", and `f` is a vector
# function describing geometrical distortions
# (see equations 2 and 3 in SIP Paper.
# However, I prefer to use `w` for "intermediate world
# coordinates", `x` for pixel coordinates, and assume that
# transformation `W` performs the **linear**
# (CD matrix + projection onto celestial sphere) part of the
# conversion from pixel coordinates to world coordinates.
# Then we can re-write (1) as:
#
# (2) w = W*(x+f(x)) = T(x)
#
# In `astropy.wcs.WCS` transformation `W` is represented by
# the `wcs_pix2world` member, while the combined ("total")
# transformation (linear part + distortions) is performed by
# `all_pix2world`. Below I summarize the notations and their
# equivalents in `astropy.wcs.WCS`:
#
# | Equation term | astropy.WCS/meaning |
# | ------------- | ---------------------------- |
# | `x` | pixel coordinates |
# | `w` | world coordinates |
# | `W` | `wcs_pix2world()` |
# | `W^{-1}` | `wcs_world2pix()` |
# | `T` | `all_pix2world()` |
# | `x+f(x)` | `pix2foc()` |
#
#
# ### Direct Solving of Equation (2) ###
#
#
# In order to find the pixel coordinates that correspond to
# given world coordinates `w`, it is necessary to invert
# equation (2): `x=T^{-1}(w)`, or solve equation `w==T(x)`
# for `x`. However, this approach has the following
# disadvantages:
# 1. It requires unnecessary transformations (see next
# section).
# 2. It is prone to "RA wrapping" issues as described in
# https://github.com/astropy/astropy/issues/1977
# (essentially because `all_pix2world` may return points with
# a different phase than user's input `w`).
#
#
# ### Description of the Method Used here ###
#
#
# By applying inverse linear WCS transformation (`W^{-1}`)
# to both sides of equation (2) and introducing notation `x'`
# (prime) for the pixels coordinates obtained from the world
# coordinates by applying inverse *linear* WCS transformation
# ("focal plane coordinates"):
#
# (3) x' = W^{-1}(w)
#
# we obtain the following equation:
#
# (4) x' = x+f(x),
#
# or,
#
# (5) x = x'-f(x)
#
# This equation is well suited for solving using the method
# of fixed-point iterations
# (http://en.wikipedia.org/wiki/Fixed-point_iteration):
#
# (6) x_{i+1} = x'-f(x_i)
#
# As an initial value of the pixel coordinate `x_0` we take
# "focal plane coordinate" `x'=W^{-1}(w)=wcs_world2pix(w)`.
# We stop iterations when `|x_{i+1}-x_i|<tolerance`. We also
# consider the process to be diverging if
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|`
# **when** `|x_{i+1}-x_i|>=tolerance` (when current
# approximation is close to the true solution,
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|` may be due to rounding errors
# and we ignore such "divergences" when
# `|x_{i+1}-x_i|<tolerance`). It may appear that checking for
# `|x_{i+1}-x_i|<tolerance` in order to ignore divergence is
# unnecessary since the iterative process should stop anyway,
# however, the proposed implementation of this iterative
# process is completely vectorized and, therefore, we may
# continue iterating over *some* points even though they have
# converged to within a specified tolerance (while iterating
# over other points that have not yet converged to
# a solution).
#
# In order to efficiently implement iterative process (6)
# using available methods in `astropy.wcs.WCS`, we add and
# subtract `x_i` from the right side of equation (6):
#
# (7) x_{i+1} = x'-(x_i+f(x_i))+x_i = x'-pix2foc(x_i)+x_i,
#
# where `x'=wcs_world2pix(w)` and it is computed only *once*
# before the beginning of the iterative process (and we also
# set `x_0=x'`). By using `pix2foc` at each iteration instead
# of `all_pix2world` we get about 25% increase in performance
# (by not performing the linear `W` transformation at each
# step) and we also avoid the "RA wrapping" issue described
# above (by working in focal plane coordinates and avoiding
# pix->world transformations).
#
# As an added benefit, the process converges to the correct
# solution in just one iteration when distortions are not
# present (compare to
# https://github.com/astropy/astropy/issues/1977 and
# https://github.com/astropy/astropy/pull/2294): in this case
# `pix2foc` is the identical transformation
# `x_i=pix2foc(x_i)` and from equation (7) we get:
#
# x' = x_0 = wcs_world2pix(w)
# x_1 = x' - pix2foc(x_0) + x_0 = x' - pix2foc(x') + x' = x'
# = wcs_world2pix(w) = x_0
# =>
# |x_1-x_0| = 0 < tolerance (with tolerance > 0)
#
# However, for performance reasons, it is still better to
# avoid iterations altogether and return the exact linear
# solution (`wcs_world2pix`) right-away when non-linear
# distortions are not present by checking that attributes
# `sip`, `cpdis1`, `cpdis2`, `det2im1`, and `det2im2` are
# *all* `None`.
#
#
# ### Outline of the Algorithm ###
#
#
# While the proposed code is relatively long (considering
# the simplicity of the algorithm), this is due to: 1)
# checking if iterative solution is necessary at all; 2)
# checking for divergence; 3) re-implementation of the
# completely vectorized algorithm as an "adaptive" vectorized
# algorithm (for cases when some points diverge for which we
# want to stop iterations). In my tests, the adaptive version
# of the algorithm is about 50% slower than non-adaptive
# version for all HST images.
#
# The essential part of the vectorized non-adaptive algorithm
# (without divergence and other checks) can be described
# as follows:
#
# pix0 = self.wcs_world2pix(world, origin)
# pix = pix0.copy() # 0-order solution
#
# for k in range(maxiter):
# # find correction to the previous solution:
# dpix = self.pix2foc(pix, origin) - pix0
#
# # compute norm (L2) of the correction:
# dn = np.linalg.norm(dpix, axis=1)
#
# # apply correction:
# pix -= dpix
#
# # check convergence:
# if np.max(dn) < tolerance:
# break
#
# return pix
#
# Here, the input parameter `world` can be a `MxN` array
# where `M` is the number of coordinate axes in WCS and `N`
# is the number of points to be converted simultaneously to
# image coordinates.
#
#
# ### IMPORTANT NOTE: ###
#
# If, in the future releases of the `~astropy.wcs`,
# `pix2foc` will not apply all the required distortion
# corrections then in the code below, calls to `pix2foc` will
# have to be replaced with
# wcs_world2pix(all_pix2world(pix_list, origin), origin)
#
# ############################################################
# # INITIALIZE ITERATIVE PROCESS: ##
# ############################################################
# initial approximation (linear WCS based only)
pix0 = self.wcs_world2pix(world, origin)
# Check that an iterative solution is required at all
# (when any of the non-CD-matrix-based corrections are
# present). If not required return the initial
# approximation (pix0).
if self.sip is None and \
self.cpdis1 is None and self.cpdis2 is None and \
self.det2im1 is None and self.det2im2 is None:
# No non-WCS corrections detected so
# simply return initial approximation:
return pix0
pix = pix0.copy() # 0-order solution
# initial correction:
dpix = self.pix2foc(pix, origin) - pix0
# Update initial solution:
pix -= dpix
# Norm (L2) squared of the correction:
dn = np.sum(dpix*dpix, axis=1)
dnprev = dn.copy() # if adaptive else dn
tol2 = tolerance**2
# Prepare for iterative process
k = 1
ind = None
inddiv = None
# Turn off numpy runtime warnings for 'invalid' and 'over':
old_invalid = np.geterr()['invalid']
old_over = np.geterr()['over']
np.seterr(invalid='ignore', over='ignore')
# ############################################################
# # NON-ADAPTIVE ITERATIONS: ##
# ############################################################
if not adaptive:
# Fixed-point iterations:
while (np.nanmax(dn) >= tol2 and k < maxiter):
# Find correction to the previous solution:
dpix = self.pix2foc(pix, origin) - pix0
# Compute norm (L2) squared of the correction:
dn = np.sum(dpix*dpix, axis=1)
# Check for divergence (we do this in two stages
# to optimize performance for the most common
# scenario when successive approximations converge):
if detect_divergence:
divergent = (dn >= dnprev)
if np.any(divergent):
# Find solutions that have not yet converged:
slowconv = (dn >= tol2)
inddiv, = np.where(divergent & slowconv)
if inddiv.shape[0] > 0:
# Update indices of elements that
# still need correction:
conv = (dn < dnprev)
iconv = np.where(conv)
# Apply correction:
dpixgood = dpix[iconv]
pix[iconv] -= dpixgood
dpix[iconv] = dpixgood
# For the next iteration choose
# non-divergent points that have not yet
# converged to the requested accuracy:
ind, = np.where(slowconv & conv)
pix0 = pix0[ind]
dnprev[ind] = dn[ind]
k += 1
# Switch to adaptive iterations:
adaptive = True
break
# Save current correction magnitudes for later:
dnprev = dn
# Apply correction:
pix -= dpix
k += 1
# ############################################################
# # ADAPTIVE ITERATIONS: ##
# ############################################################
if adaptive:
if ind is None:
ind, = np.where(np.isfinite(pix).all(axis=1))
pix0 = pix0[ind]
# "Adaptive" fixed-point iterations:
while (ind.shape[0] > 0 and k < maxiter):
# Find correction to the previous solution:
dpixnew = self.pix2foc(pix[ind], origin) - pix0
# Compute norm (L2) of the correction:
dnnew = np.sum(np.square(dpixnew), axis=1)
# Bookeeping of corrections:
dnprev[ind] = dn[ind].copy()
dn[ind] = dnnew
if detect_divergence:
# Find indices of pixels that are converging:
conv = (dnnew < dnprev[ind])
iconv = np.where(conv)
iiconv = ind[iconv]
# Apply correction:
dpixgood = dpixnew[iconv]
pix[iiconv] -= dpixgood
dpix[iiconv] = dpixgood
# Find indices of solutions that have not yet
# converged to the requested accuracy
# AND that do not diverge:
subind, = np.where((dnnew >= tol2) & conv)
else:
# Apply correction:
pix[ind] -= dpixnew
dpix[ind] = dpixnew
# Find indices of solutions that have not yet
# converged to the requested accuracy:
subind, = np.where(dnnew >= tol2)
# Choose solutions that need more iterations:
ind = ind[subind]
pix0 = pix0[subind]
k += 1
# ############################################################
# # FINAL DETECTION OF INVALID, DIVERGING, ##
# # AND FAILED-TO-CONVERGE POINTS ##
# ############################################################
# Identify diverging and/or invalid points:
invalid = ((~np.all(np.isfinite(pix), axis=1)) &
(np.all(np.isfinite(world), axis=1)))
# When detect_divergence==False, dnprev is outdated
# (it is the norm of the very first correction).
# Still better than nothing...
inddiv, = np.where(((dn >= tol2) & (dn >= dnprev)) | invalid)
if inddiv.shape[0] == 0:
inddiv = None
# Identify points that did not converge within 'maxiter'
# iterations:
if k >= maxiter:
ind, = np.where((dn >= tol2) & (dn < dnprev) & (~invalid))
if ind.shape[0] == 0:
ind = None
else:
ind = None
# Restore previous numpy error settings:
np.seterr(invalid=old_invalid, over=old_over)
# ############################################################
# # RAISE EXCEPTION IF DIVERGING OR TOO SLOWLY CONVERGING ##
# # DATA POINTS HAVE BEEN DETECTED: ##
# ############################################################
if (ind is not None or inddiv is not None) and not quiet:
if inddiv is None:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy after {:d} "
"iterations.".format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=None)
else:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy.\n"
"After {0:d} iterations, the solution is diverging "
"at least for one input point."
.format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=inddiv)
return pix
def all_world2pix(self, *args, tolerance=1e-4, maxiter=20, adaptive=False,
detect_divergence=True, quiet=False, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda *args, **kwargs:
self._all_world2pix(
*args, tolerance=tolerance, maxiter=maxiter,
adaptive=adaptive, detect_divergence=detect_divergence,
quiet=quiet),
'input', *args, **kwargs
)
all_world2pix.__doc__ = """
all_world2pix(*arg, accuracy=1.0e-4, maxiter=20,
adaptive=False, detect_divergence=True, quiet=False)
Transforms world coordinates to pixel coordinates, using
numerical iteration to invert the full forward transformation
`~astropy.wcs.WCS.all_pix2world` with complete
distortion model.
Parameters
----------
{0}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{1}
tolerance : float, optional (Default = 1.0e-4)
Tolerance of solution. Iteration terminates when the
iterative solver estimates that the "true solution" is
within this many pixels current estimate, more
specifically, when the correction to the solution found
during the previous iteration is smaller
(in the sense of the L2 norm) than ``tolerance``.
maxiter : int, optional (Default = 20)
Maximum number of iterations allowed to reach a solution.
quiet : bool, optional (Default = False)
Do not throw :py:class:`NoConvergence` exceptions when
the method does not converge to a solution with the
required accuracy within a specified number of maximum
iterations set by ``maxiter`` parameter. Instead,
simply return the found solution.
Other Parameters
----------------
adaptive : bool, optional (Default = False)
Specifies whether to adaptively select only points that
did not converge to a solution within the required
accuracy for the next iteration. Default is recommended
for HST as well as most other instruments.
.. note::
The :py:meth:`all_world2pix` uses a vectorized
implementation of the method of consecutive
approximations (see ``Notes`` section below) in which it
iterates over *all* input points *regardless* until
the required accuracy has been reached for *all* input
points. In some cases it may be possible that
*almost all* points have reached the required accuracy
but there are only a few of input data points for
which additional iterations may be needed (this
depends mostly on the characteristics of the geometric
distortions for a given instrument). In this situation
it may be advantageous to set ``adaptive`` = `True` in
which case :py:meth:`all_world2pix` will continue
iterating *only* over the points that have not yet
converged to the required accuracy. However, for the
HST's ACS/WFC detector, which has the strongest
distortions of all HST instruments, testing has
shown that enabling this option would lead to a about
50-100% penalty in computational time (depending on
specifics of the image, geometric distortions, and
number of input points to be converted). Therefore,
for HST and possibly instruments, it is recommended
to set ``adaptive`` = `False`. The only danger in
getting this setting wrong will be a performance
penalty.
.. note::
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will automatically switch
to the adaptive algorithm once divergence has been
detected.
detect_divergence : bool, optional (Default = True)
Specifies whether to perform a more detailed analysis
of the convergence to a solution. Normally
:py:meth:`all_world2pix` may not achieve the required
accuracy if either the ``tolerance`` or ``maxiter`` arguments
are too low. However, it may happen that for some
geometric distortions the conditions of convergence for
the the method of consecutive approximations used by
:py:meth:`all_world2pix` may not be satisfied, in which
case consecutive approximations to the solution will
diverge regardless of the ``tolerance`` or ``maxiter``
settings.
When ``detect_divergence`` is `False`, these divergent
points will be detected as not having achieved the
required accuracy (without further details). In addition,
if ``adaptive`` is `False` then the algorithm will not
know that the solution (for specific points) is diverging
and will continue iterating and trying to "improve"
diverging solutions. This may result in ``NaN`` or
``Inf`` values in the return results (in addition to a
performance penalties). Even when ``detect_divergence``
is `False`, :py:meth:`all_world2pix`, at the end of the
iterative process, will identify invalid results
(``NaN`` or ``Inf``) as "diverging" solutions and will
raise :py:class:`NoConvergence` unless the ``quiet``
parameter is set to `True`.
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will detect points for which
current correction to the coordinates is larger than
the correction applied during the previous iteration
**if** the requested accuracy **has not yet been
achieved**. In this case, if ``adaptive`` is `True`,
these points will be excluded from further iterations and
if ``adaptive`` is `False`, :py:meth:`all_world2pix` will
automatically switch to the adaptive algorithm. Thus, the
reported divergent solution will be the latest converging
solution computed immediately *before* divergence
has been detected.
.. note::
When accuracy has been achieved, small increases in
current corrections may be possible due to rounding
errors (when ``adaptive`` is `False`) and such
increases will be ignored.
.. note::
Based on our testing using HST ACS/WFC images, setting
``detect_divergence`` to `True` will incur about 5-20%
performance penalty with the larger penalty
corresponding to ``adaptive`` set to `True`.
Because the benefits of enabling this
feature outweigh the small performance penalty,
especially when ``adaptive`` = `False`, it is
recommended to set ``detect_divergence`` to `True`,
unless extensive testing of the distortion models for
images from specific instruments show a good stability
of the numerical method for a wide range of
coordinates (even outside the image itself).
.. note::
Indices of the diverging inverse solutions will be
reported in the ``divergent`` attribute of the
raised :py:class:`NoConvergence` exception object.
Returns
-------
{2}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp`, and
`~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Using the method of fixed-point iterations approximations we
iterate starting with the initial approximation, which is
computed using the non-distortion-aware
:py:meth:`wcs_world2pix` (or equivalent).
The :py:meth:`all_world2pix` function uses a vectorized
implementation of the method of consecutive approximations and
therefore it is highly efficient (>30x) when *all* data points
that need to be converted from sky coordinates to image
coordinates are passed at *once*. Therefore, it is advisable,
whenever possible, to pass as input a long array of all points
that need to be converted to :py:meth:`all_world2pix` instead
of calling :py:meth:`all_world2pix` for each data point. Also
see the note to the ``adaptive`` parameter.
Raises
------
NoConvergence
The method did not converge to a
solution to the required accuracy within a specified
number of maximum iterations set by the ``maxiter``
parameter. To turn off this exception, set ``quiet`` to
`True`. Indices of the points for which the requested
accuracy was not achieved (if any) will be listed in the
``slow_conv`` attribute of the
raised :py:class:`NoConvergence` exception object.
See :py:class:`NoConvergence` documentation for
more details.
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Examples
--------
>>> import astropy.io.fits as fits
>>> import astropy.wcs as wcs
>>> import numpy as np
>>> import os
>>> filename = os.path.join(wcs.__path__[0], 'tests/data/j94f05bgq_flt.fits')
>>> hdulist = fits.open(filename)
>>> w = wcs.WCS(hdulist[('sci',1)].header, hdulist)
>>> hdulist.close()
>>> ra, dec = w.all_pix2world([1,2,3], [1,1,1], 1)
>>> print(ra) # doctest: +FLOAT_CMP
[ 5.52645627 5.52649663 5.52653698]
>>> print(dec) # doctest: +FLOAT_CMP
[-72.05171757 -72.05171276 -72.05170795]
>>> radec = w.all_pix2world([[1,1], [2,1], [3,1]], 1)
>>> print(radec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 5.52649663 -72.05171276]
[ 5.52653698 -72.05170795]]
>>> x, y = w.all_world2pix(ra, dec, 1)
>>> print(x) # doctest: +FLOAT_CMP
[ 1.00000238 2.00000237 3.00000236]
>>> print(y) # doctest: +FLOAT_CMP
[ 0.99999996 0.99999997 0.99999997]
>>> xy = w.all_world2pix(radec, 1)
>>> print(xy) # doctest: +FLOAT_CMP
[[ 1.00000238 0.99999996]
[ 2.00000237 0.99999997]
[ 3.00000236 0.99999997]]
>>> xy = w.all_world2pix(radec, 1, maxiter=3,
... tolerance=1.0e-10, quiet=False)
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 3 iterations, the solution is
diverging at least for one input point.
>>> # Now try to use some diverging data:
>>> divradec = w.all_pix2world([[1.0, 1.0],
... [10000.0, 50000.0],
... [3.0, 1.0]], 1)
>>> print(divradec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 7.15976932 -70.8140779 ]
[ 5.52653698 -72.05170795]]
>>> # First, turn detect_divergence on:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=True,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000238e+00 9.99999965e-01]
[ -1.99441636e+06 1.44309097e+06]
[ 3.00000236e+00 9.99999966e-01]]
Achieved accuracy:
[[ 6.13968380e-05 8.59638593e-07]
[ 8.59526812e+11 6.61713548e+11]
[ 6.09398446e-05 8.38759724e-07]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 5 iterations, the solution is
diverging at least for one input point.
>>> # This time turn detect_divergence off:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=False,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000009 1. ]
[ nan nan]
[ 3.00000009 1. ]]
Achieved accuracy:
[[ 2.29417358e-06 3.21222995e-08]
[ nan nan]
[ 2.27407877e-06 3.13005639e-08]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 6 iterations, the solution is
diverging at least for one input point.
""".format(__.TWO_OR_MORE_ARGS('naxis', 8),
__.RA_DEC_ORDER(8),
__.RETURNS('pixel coordinates', 8))
def wcs_world2pix(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.s2p(xy, o)['pixcrd'],
'input', *args, **kwargs)
wcs_world2pix.__doc__ = """
Transforms world coordinates to pixel coordinates, using only
the basic `wcslib`_ WCS transformation. No `SIP`_ or
`distortion paper`_ table lookup transformation is applied.
Parameters
----------
{0}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{1}
Returns
-------
{2}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
""".format(__.TWO_OR_MORE_ARGS('naxis', 8),
__.RA_DEC_ORDER(8),
__.RETURNS('pixel coordinates', 8))
def pix2foc(self, *args):
return self._array_converter(self._pix2foc, None, *args)
pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention and `distortion
paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{0}
Returns
-------
{1}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(__.TWO_OR_MORE_ARGS('2', 8),
__.RETURNS('focal coordinates', 8))
def p4_pix2foc(self, *args):
return self._array_converter(self._p4_pix2foc, None, *args)
p4_pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{0}
Returns
-------
{1}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(__.TWO_OR_MORE_ARGS('2', 8),
__.RETURNS('focal coordinates', 8))
def det2im(self, *args):
return self._array_converter(self._det2im, None, *args)
det2im.__doc__ = """
Convert detector coordinates to image plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{0}
Returns
-------
{1}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(__.TWO_OR_MORE_ARGS('2', 8),
__.RETURNS('pixel coordinates', 8))
def sip_pix2foc(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.pix2foc, None, *args)
sip_pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention.
The output is in pixel coordinates, relative to ``CRPIX``.
FITS WCS `distortion paper`_ table lookup correction is not
applied, even if that information existed in the FITS file
that initialized this :class:`~astropy.wcs.WCS` object. To
correct for that, use `~astropy.wcs.WCS.pix2foc` or
`~astropy.wcs.WCS.p4_pix2foc`.
Parameters
----------
{0}
Returns
-------
{1}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(__.TWO_OR_MORE_ARGS('2', 8),
__.RETURNS('focal coordinates', 8))
def sip_foc2pix(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.foc2pix, None, *args)
sip_foc2pix.__doc__ = """
Convert focal plane coordinates to pixel coordinates using the
`SIP`_ polynomial distortion convention.
FITS WCS `distortion paper`_ table lookup distortion
correction is not applied, even if that information existed in
the FITS file that initialized this `~astropy.wcs.WCS` object.
Parameters
----------
{0}
Returns
-------
{1}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(__.TWO_OR_MORE_ARGS('2', 8),
__.RETURNS('pixel coordinates', 8))
def to_fits(self, relax=False, key=None):
"""
Generate an `astropy.io.fits.HDUList` object with all of the
information stored in this object. This should be logically identical
to the input FITS file, but it will be normalized in a number of ways.
See `to_header` for some warnings about the output produced.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`relaxwrite` for details.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
hdulist : `astropy.io.fits.HDUList`
"""
header = self.to_header(relax=relax, key=key)
hdu = fits.PrimaryHDU(header=header)
hdulist = fits.HDUList(hdu)
self._write_det2im(hdulist)
self._write_distortion_kw(hdulist)
return hdulist
def to_header(self, relax=None, key=None):
"""Generate an `astropy.io.fits.Header` object with the basic WCS
and SIP information stored in this object. This should be
logically identical to the input FITS file, but it will be
normalized in a number of ways.
.. warning::
This function does not write out FITS WCS `distortion
paper`_ information, since that requires multiple FITS
header data units. To get a full representation of
everything in this object, use `to_fits`.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`relaxwrite` for details.
If the ``relax`` keyword argument is not given and any
keywords were omitted from the output, an
`~astropy.utils.exceptions.AstropyWarning` is displayed.
To override this, explicitly pass a value to ``relax``.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
header : `astropy.io.fits.Header`
Notes
-----
The output header will almost certainly differ from the input in a
number of respects:
1. The output header only contains WCS-related keywords. In
particular, it does not contain syntactically-required
keywords such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or
``END``.
2. Deprecated (e.g. ``CROTAn``) or non-standard usage will
be translated to standard (this is partially dependent on
whether ``fix`` was applied).
3. Quantities will be converted to the units used internally,
basically SI with the addition of degrees.
4. Floating-point quantities may be given to a different decimal
precision.
5. Elements of the ``PCi_j`` matrix will be written if and
only if they differ from the unit matrix. Thus, if the
matrix is unity then no elements will be written.
6. Additional keywords such as ``WCSAXES``, ``CUNITia``,
``LONPOLEa`` and ``LATPOLEa`` may appear.
7. The original keycomments will be lost, although
`to_header` tries hard to write meaningful comments.
8. Keyword order may be changed.
"""
# default precision for numerical WCS keywords
precision = WCSHDO_P14
display_warning = False
if relax is None:
display_warning = True
relax = False
if relax not in (True, False):
do_sip = relax & WCSHDO_SIP
relax &= ~WCSHDO_SIP
else:
do_sip = relax
relax = WCSHDO_all if relax is True else WCSHDO_safe
relax = precision | relax
if self.wcs is not None:
if key is not None:
orig_key = self.wcs.alt
self.wcs.alt = key
header_string = self.wcs.to_header(relax)
header = fits.Header.fromstring(header_string)
keys_to_remove = ["", " ", "COMMENT"]
for kw in keys_to_remove:
if kw in header:
del header[kw]
else:
header = fits.Header()
if do_sip and self.sip is not None:
if self.wcs is not None and any(not ctyp.endswith('-SIP') for ctyp in self.wcs.ctype):
self._fix_ctype(header, add_sip=True)
for kw, val in self._write_sip_kw().items():
header[kw] = val
if not do_sip and self.wcs is not None and any(self.wcs.ctype) and self.sip is not None:
# This is called when relax is not False or WCSHDO_SIP
# The default case of ``relax=None`` is handled further in the code.
header = self._fix_ctype(header, add_sip=False)
if display_warning:
full_header = self.to_header(relax=True, key=key)
missing_keys = []
for kw, val in full_header.items():
if kw not in header:
missing_keys.append(kw)
if len(missing_keys):
warnings.warn(
"Some non-standard WCS keywords were excluded: {0} "
"Use the ``relax`` kwarg to control this.".format(
', '.join(missing_keys)),
AstropyWarning)
# called when ``relax=None``
# This is different from the case of ``relax=False``.
if any(self.wcs.ctype) and self.sip is not None:
header = self._fix_ctype(header, add_sip=False, log_message=False)
# Finally reset the key. This must be called after ``_fix_ctype``.
if key is not None:
self.wcs.alt = orig_key
return header
def _fix_ctype(self, header, add_sip=True, log_message=True):
"""
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header.
add_sip : bool
Flag indicating whether "-SIP" should be added or removed from CTYPE keywords.
Remove "-SIP" from CTYPE when writing out a header with relax=False.
This needs to be done outside ``to_header`` because ``to_header`` runs
twice when ``relax=False`` and the second time ``relax`` is set to ``True``
to display the missing keywords.
If the user requested SIP distortion to be written out add "-SIP" to
CTYPE if it is missing.
"""
_add_sip_to_ctype = """
Inconsistent SIP distortion information is present in the current WCS:
SIP coefficients were detected, but CTYPE is missing "-SIP" suffix,
therefore the current WCS is internally inconsistent.
Because relax has been set to True, the resulting output WCS will have
"-SIP" appended to CTYPE in order to make the header internally consistent.
However, this may produce incorrect astrometry in the output WCS, if
in fact the current WCS is already distortion-corrected.
Therefore, if current WCS is already distortion-corrected (eg, drizzled)
then SIP distortion components should not apply. In that case, for a WCS
that is already distortion-corrected, please remove the SIP coefficients
from the header.
"""
if log_message:
if add_sip:
log.info(_add_sip_to_ctype)
for i in range(1, self.naxis+1):
# strip() must be called here to cover the case of alt key= " "
kw = 'CTYPE{0}{1}'.format(i, self.wcs.alt).strip()
if kw in header:
if add_sip:
val = header[kw].strip("-SIP") + "-SIP"
else:
val = header[kw].strip("-SIP")
header[kw] = val
else:
continue
return header
def to_header_string(self, relax=None):
"""
Identical to `to_header`, but returns a string containing the
header cards.
"""
return str(self.to_header(relax))
def footprint_to_file(self, filename='footprint.reg', color='green',
width=2, coordsys=None):
"""
Writes out a `ds9`_ style regions file. It can be loaded
directly by `ds9`_.
Parameters
----------
filename : str, optional
Output file name - default is ``'footprint.reg'``
color : str, optional
Color to use when plotting the line.
width : int, optional
Width of the region line.
coordsys : str, optional
Coordinate system. If not specified (default), the ``radesys``
value is used. For all possible values, see
http://ds9.si.edu/doc/ref/region.html#RegionFileFormat
"""
comments = ('# Region file format: DS9 version 4.0 \n'
'# global color=green font="helvetica 12 bold '
'select=1 highlite=1 edit=1 move=1 delete=1 '
'include=1 fixed=0 source\n')
coordsys = coordsys or self.wcs.radesys
if coordsys not in ('PHYSICAL', 'IMAGE', 'FK4', 'B1950', 'FK5',
'J2000', 'GALACTIC', 'ECLIPTIC', 'ICRS', 'LINEAR',
'AMPLIFIER', 'DETECTOR'):
raise ValueError("Coordinate system '{}' is not supported. A valid"
" one can be given with the 'coordsys' argument."
.format(coordsys))
with open(filename, mode='w') as f:
f.write(comments)
f.write('{}\n'.format(coordsys))
f.write('polygon(')
self.calc_footprint().tofile(f, sep=',')
f.write(') # color={0}, width={1:d} \n'.format(color, width))
@property
def _naxis1(self):
return self._naxis[0]
@_naxis1.setter
def _naxis1(self, value):
self._naxis[0] = value
@property
def _naxis2(self):
return self._naxis[1]
@_naxis2.setter
def _naxis2(self, value):
self._naxis[1] = value
def _get_naxis(self, header=None):
_naxis = []
if (header is not None and
not isinstance(header, (str, bytes))):
for naxis in itertools.count(1):
try:
_naxis.append(header['NAXIS{}'.format(naxis)])
except KeyError:
break
if len(_naxis) == 0:
_naxis = [0, 0]
elif len(_naxis) == 1:
_naxis.append(0)
self._naxis = _naxis
def printwcs(self):
print(repr(self))
def __repr__(self):
'''
Return a short description. Simply porting the behavior from
the `printwcs()` method.
'''
description = ["WCS Keywords\n",
"Number of WCS axes: {0!r}".format(self.naxis)]
sfmt = ' : ' + "".join(["{"+"{0}".format(i)+"!r} " for i in range(self.naxis)])
keywords = ['CTYPE', 'CRVAL', 'CRPIX']
values = [self.wcs.ctype, self.wcs.crval, self.wcs.crpix]
for keyword, value in zip(keywords, values):
description.append(keyword+sfmt.format(*value))
if hasattr(self.wcs, 'pc'):
for i in range(self.naxis):
s = ''
for j in range(self.naxis):
s += ''.join(['PC', str(i+1), '_', str(j+1), ' '])
s += sfmt
description.append(s.format(*self.wcs.pc[i]))
s = 'CDELT' + sfmt
description.append(s.format(*self.wcs.cdelt))
elif hasattr(self.wcs, 'cd'):
for i in range(self.naxis):
s = ''
for j in range(self.naxis):
s += "".join(['CD', str(i+1), '_', str(j+1), ' '])
s += sfmt
description.append(s.format(*self.wcs.cd[i]))
description.append('NAXIS : {}'.format(' '.join(map(str, self._naxis))))
return '\n'.join(description)
def get_axis_types(self):
"""
Similar to `self.wcsprm.axis_types <astropy.wcs.Wcsprm.axis_types>`
but provides the information in a more Python-friendly format.
Returns
-------
result : list of dicts
Returns a list of dictionaries, one for each axis, each
containing attributes about the type of that axis.
Each dictionary has the following keys:
- 'coordinate_type':
- None: Non-specific coordinate type.
- 'stokes': Stokes coordinate.
- 'celestial': Celestial coordinate (including ``CUBEFACE``).
- 'spectral': Spectral coordinate.
- 'scale':
- 'linear': Linear axis.
- 'quantized': Quantized axis (``STOKES``, ``CUBEFACE``).
- 'non-linear celestial': Non-linear celestial axis.
- 'non-linear spectral': Non-linear spectral axis.
- 'logarithmic': Logarithmic axis.
- 'tabular': Tabular axis.
- 'group'
- Group number, e.g. lookup table number
- 'number'
- For celestial axes:
- 0: Longitude coordinate.
- 1: Latitude coordinate.
- 2: ``CUBEFACE`` number.
- For lookup tables:
- the axis number in a multidimensional table.
``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will
generate an error.
"""
if self.wcs is None:
raise AttributeError(
"This WCS object does not have a wcsprm object.")
coordinate_type_map = {
0: None,
1: 'stokes',
2: 'celestial',
3: 'spectral'}
scale_map = {
0: 'linear',
1: 'quantized',
2: 'non-linear celestial',
3: 'non-linear spectral',
4: 'logarithmic',
5: 'tabular'}
result = []
for axis_type in self.wcs.axis_types:
subresult = {}
coordinate_type = (axis_type // 1000) % 10
subresult['coordinate_type'] = coordinate_type_map[coordinate_type]
scale = (axis_type // 100) % 10
subresult['scale'] = scale_map[scale]
group = (axis_type // 10) % 10
subresult['group'] = group
number = axis_type % 10
subresult['number'] = number
result.append(subresult)
return result
def __reduce__(self):
"""
Support pickling of WCS objects. This is done by serializing
to an in-memory FITS file and dumping that as a string.
"""
hdulist = self.to_fits(relax=True)
buffer = io.BytesIO()
hdulist.writeto(buffer)
return (__WCS_unpickle__,
(self.__class__, self.__dict__, buffer.getvalue(),))
def dropaxis(self, dropax):
"""
Remove an axis from the WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS with naxis to be chopped to naxis-1
dropax : int
The index of the WCS to drop, counting from 0 (i.e., python convention,
not FITS convention)
Returns
-------
A new `~astropy.wcs.WCS` instance with one axis fewer
"""
inds = list(range(self.wcs.naxis))
inds.pop(dropax)
# axis 0 has special meaning to sub
# if wcs.wcs.ctype == ['RA','DEC','VLSR'], you want
# wcs.sub([1,2]) to get 'RA','DEC' back
return self.sub([i+1 for i in inds])
def swapaxes(self, ax0, ax1):
"""
Swap axes in a WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to have its axes swapped
ax0 : int
ax1 : int
The indices of the WCS to be swapped, counting from 0 (i.e., python
convention, not FITS convention)
Returns
-------
A new `~astropy.wcs.WCS` instance with the same number of axes, but two
swapped
"""
inds = list(range(self.wcs.naxis))
inds[ax0], inds[ax1] = inds[ax1], inds[ax0]
return self.sub([i+1 for i in inds])
def reorient_celestial_first(self):
"""
Reorient the WCS such that the celestial axes are first, followed by
the spectral axis, followed by any others.
Assumes at least celestial axes are present.
"""
return self.sub([WCSSUB_CELESTIAL, WCSSUB_SPECTRAL, WCSSUB_STOKES])
def slice(self, view, numpy_order=True):
"""
Slice a WCS instance using a Numpy slice. The order of the slice should
be reversed (as for the data) compared to the natural WCS order.
Parameters
----------
view : tuple
A tuple containing the same number of slices as the WCS system.
The ``step`` method, the third argument to a slice, is not
presently supported.
numpy_order : bool
Use numpy order, i.e. slice the WCS so that an identical slice
applied to a numpy array will slice the array and WCS in the same
way. If set to `False`, the WCS will be sliced in FITS order,
meaning the first slice will be applied to the *last* numpy index
but the *first* WCS axis.
Returns
-------
wcs_new : `~astropy.wcs.WCS`
A new resampled WCS axis
"""
if hasattr(view, '__len__') and len(view) > self.wcs.naxis:
raise ValueError("Must have # of slices <= # of WCS axes")
elif not hasattr(view, '__len__'): # view MUST be an iterable
view = [view]
if not all(isinstance(x, slice) for x in view):
raise ValueError("Cannot downsample a WCS with indexing. Use "
"wcs.sub or wcs.dropaxis if you want to remove "
"axes.")
wcs_new = self.deepcopy()
if wcs_new.sip is not None:
sip_crpix = wcs_new.sip.crpix.tolist()
for i, iview in enumerate(view):
if iview.step is not None and iview.step < 0:
raise NotImplementedError("Reversing an axis is not "
"implemented.")
if numpy_order:
wcs_index = self.wcs.naxis - 1 - i
else:
wcs_index = i
if iview.step is not None and iview.start is None:
# Slice from "None" is equivalent to slice from 0 (but one
# might want to downsample, so allow slices with
# None,None,step or None,stop,step)
iview = slice(0, iview.stop, iview.step)
if iview.start is not None:
if iview.step not in (None, 1):
crpix = self.wcs.crpix[wcs_index]
cdelt = self.wcs.cdelt[wcs_index]
# equivalently (keep this comment so you can compare eqns):
# wcs_new.wcs.crpix[wcs_index] =
# (crpix - iview.start)*iview.step + 0.5 - iview.step/2.
crp = ((crpix - iview.start - 1.)/iview.step
+ 0.5 + 1./iview.step/2.)
wcs_new.wcs.crpix[wcs_index] = crp
if wcs_new.sip is not None:
sip_crpix[wcs_index] = crp
wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step
else:
wcs_new.wcs.crpix[wcs_index] -= iview.start
if wcs_new.sip is not None:
sip_crpix[wcs_index] -= iview.start
try:
# range requires integers but the other attributes can also
# handle arbitary values, so this needs to be in a try/except.
nitems = len(builtins.range(self._naxis[wcs_index])[iview])
except TypeError as exc:
if 'indices must be integers' not in str(exc):
raise
warnings.warn("NAXIS{0} attribute is not updated because at "
"least one indix ('{1}') is no integer."
"".format(wcs_index, iview), AstropyUserWarning)
else:
wcs_new._naxis[wcs_index] = nitems
if wcs_new.sip is not None:
wcs_new.sip = Sip(self.sip.a, self.sip.b, self.sip.ap, self.sip.bp,
sip_crpix)
return wcs_new
def __getitem__(self, item):
# "getitem" is a shortcut for self.slice; it is very limited
# there is no obvious and unambiguous interpretation of wcs[1,2,3]
# We COULD allow wcs[1] to link to wcs.sub([2])
# (wcs[i] -> wcs.sub([i+1])
return self.slice(item)
def __iter__(self):
# Having __getitem__ makes Python think WCS is iterable. However,
# Python first checks whether __iter__ is present, so we can raise an
# exception here.
raise TypeError("'{0}' object is not iterable".format(self.__class__.__name__))
@property
def axis_type_names(self):
"""
World names for each coordinate axis
Returns
-------
A list of names along each axis
"""
names = list(self.wcs.cname)
types = self.wcs.ctype
for i in range(len(names)):
if len(names[i]) > 0:
continue
names[i] = types[i].split('-')[0]
return names
@property
def celestial(self):
"""
A copy of the current WCS with only the celestial axes included
"""
return self.sub([WCSSUB_CELESTIAL])
@property
def is_celestial(self):
return self.has_celestial and self.naxis == 2
@property
def has_celestial(self):
try:
return self.celestial.naxis == 2
except InconsistentAxisTypesError:
return False
@property
def pixel_scale_matrix(self):
try:
cdelt = np.matrix(np.diag(self.wcs.get_cdelt()))
pc = np.matrix(self.wcs.get_pc())
except InconsistentAxisTypesError:
try:
# for non-celestial axes, get_cdelt doesn't work
cdelt = np.matrix(self.wcs.cd) * np.matrix(np.diag(self.wcs.cdelt))
except AttributeError:
cdelt = np.matrix(np.diag(self.wcs.cdelt))
try:
pc = np.matrix(self.wcs.pc)
except AttributeError:
pc = 1
pccd = np.array(cdelt * pc)
return pccd
def _as_mpl_axes(self):
"""
Compatibility hook for Matplotlib and WCSAxes.
With this method, one can do:
from astropy.wcs import WCS
import matplotlib.pyplot as plt
wcs = WCS('filename.fits')
fig = plt.figure()
ax = fig.add_axes([0.15, 0.1, 0.8, 0.8], projection=wcs)
...
and this will generate a plot with the correct WCS coordinates on the
axes.
"""
from ..visualization.wcsaxes import WCSAxes
return WCSAxes, {'wcs': self}
def __WCS_unpickle__(cls, dct, fits_data):
"""
Unpickles a WCS object from a serialized FITS string.
"""
self = cls.__new__(cls)
self.__dict__.update(dct)
buffer = io.BytesIO(fits_data)
hdulist = fits.open(buffer)
WCS.__init__(self, hdulist[0].header, hdulist)
return self
def find_all_wcs(header, relax=True, keysel=None, fix=True,
translate_units='',
_do_set=True):
"""
Find all the WCS transformations in the given header.
Parameters
----------
header : str or astropy.io.fits header object.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions of the
WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`relaxread` for details.
keysel : sequence of flags, optional
A list of flags used to select the keyword types considered by
wcslib. When ``None``, only the standard image header
keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting objects to fix any non-standard uses in the
header. `FITSFixedWarning` warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Returns
-------
wcses : list of `WCS` objects
"""
if isinstance(header, (str, bytes)):
header_string = header
elif isinstance(header, fits.Header):
header_string = header.tostring()
else:
raise TypeError(
"header must be a string or astropy.io.fits.Header object")
keysel_flags = _parse_keysel(keysel)
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
else:
header_bytes = header_string
wcsprms = _wcs.find_all_wcs(header_bytes, relax, keysel_flags)
result = []
for wcsprm in wcsprms:
subresult = WCS(fix=False, _do_set=False)
subresult.wcs = wcsprm
result.append(subresult)
if fix:
subresult.fix(translate_units)
if _do_set:
subresult.wcs.set()
return result
def validate(source):
"""
Prints a WCS validation report for the given FITS file.
Parameters
----------
source : str path, readable file-like object or `astropy.io.fits.HDUList` object
The FITS file to validate.
Returns
-------
results : WcsValidateResults instance
The result is returned as nested lists. The first level
corresponds to the HDUs in the given file. The next level has
an entry for each WCS found in that header. The special
subclass of list will pretty-print the results as a table when
printed.
"""
class _WcsValidateWcsResult(list):
def __init__(self, key):
self._key = key
def __repr__(self):
result = [" WCS key '{0}':".format(self._key or ' ')]
if len(self):
for entry in self:
for i, line in enumerate(entry.splitlines()):
if i == 0:
initial_indent = ' - '
else:
initial_indent = ' '
result.extend(
textwrap.wrap(
line,
initial_indent=initial_indent,
subsequent_indent=' '))
else:
result.append(" No issues.")
return '\n'.join(result)
class _WcsValidateHduResult(list):
def __init__(self, hdu_index, hdu_name):
self._hdu_index = hdu_index
self._hdu_name = hdu_name
list.__init__(self)
def __repr__(self):
if len(self):
if self._hdu_name:
hdu_name = ' ({0})'.format(self._hdu_name)
else:
hdu_name = ''
result = ['HDU {0}{1}:'.format(self._hdu_index, hdu_name)]
for wcs in self:
result.append(repr(wcs))
return '\n'.join(result)
return ''
class _WcsValidateResults(list):
def __repr__(self):
result = []
for hdu in self:
content = repr(hdu)
if len(content):
result.append(content)
return '\n\n'.join(result)
global __warningregistry__
if isinstance(source, fits.HDUList):
hdulist = source
else:
hdulist = fits.open(source)
results = _WcsValidateResults()
for i, hdu in enumerate(hdulist):
hdu_results = _WcsValidateHduResult(i, hdu.name)
results.append(hdu_results)
with warnings.catch_warnings(record=True) as warning_lines:
wcses = find_all_wcs(
hdu.header, relax=_wcs.WCSHDR_reject,
fix=False, _do_set=False)
for wcs in wcses:
wcs_results = _WcsValidateWcsResult(wcs.wcs.alt)
hdu_results.append(wcs_results)
try:
del __warningregistry__
except NameError:
pass
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter(
"always", FITSFixedWarning, append=True)
try:
WCS(hdu.header,
key=wcs.wcs.alt or ' ',
relax=_wcs.WCSHDR_reject,
fix=True, _do_set=False)
except WcsError as e:
wcs_results.append(str(e))
wcs_results.extend([str(x.message) for x in warning_lines])
return results
|
bb8f14f22dcf05c37370dd69e0d57f7685c80576a31eb8eed57097313b64c731 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.time package provides functionality for manipulating times and
dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI,
UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in
astronomy.
"""
import copy
import operator
from datetime import datetime, date, timedelta
from time import strftime, strptime
import numpy as np
from .. import units as u, constants as const
from .. import _erfa as erfa
from ..units import UnitConversionError
from ..utils import ShapedLikeNDArray
from ..utils.compat.misc import override__dir__
from ..utils.data_info import MixinInfo, data_info_factory
from .utils import day_frac
from .formats import (TIME_FORMATS, TIME_DELTA_FORMATS,
TimeJD, TimeUnique, TimeAstropyTime, TimeDatetime)
# Import TimeFromEpoch to avoid breaking code that followed the old example of
# making a custom timescale in the documentation.
from .formats import TimeFromEpoch # pylint: disable=W0611
__all__ = ['Time', 'TimeDelta', 'TIME_SCALES', 'STANDARD_TIME_SCALES', 'TIME_DELTA_SCALES',
'ScaleValueError', 'OperandTypeError', 'TimeInfo']
STANDARD_TIME_SCALES = ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
LOCAL_SCALES = ('local',)
TIME_TYPES = dict((scale, scales) for scales in (STANDARD_TIME_SCALES, LOCAL_SCALES) for scale in scales)
TIME_SCALES = STANDARD_TIME_SCALES + LOCAL_SCALES
MULTI_HOPS = {('tai', 'tcb'): ('tt', 'tdb'),
('tai', 'tcg'): ('tt',),
('tai', 'ut1'): ('utc',),
('tai', 'tdb'): ('tt',),
('tcb', 'tcg'): ('tdb', 'tt'),
('tcb', 'tt'): ('tdb',),
('tcb', 'ut1'): ('tdb', 'tt', 'tai', 'utc'),
('tcb', 'utc'): ('tdb', 'tt', 'tai'),
('tcg', 'tdb'): ('tt',),
('tcg', 'ut1'): ('tt', 'tai', 'utc'),
('tcg', 'utc'): ('tt', 'tai'),
('tdb', 'ut1'): ('tt', 'tai', 'utc'),
('tdb', 'utc'): ('tt', 'tai'),
('tt', 'ut1'): ('tai', 'utc'),
('tt', 'utc'): ('tai',),
}
GEOCENTRIC_SCALES = ('tai', 'tt', 'tcg')
BARYCENTRIC_SCALES = ('tcb', 'tdb')
ROTATIONAL_SCALES = ('ut1',)
TIME_DELTA_TYPES = dict((scale, scales)
for scales in (GEOCENTRIC_SCALES, BARYCENTRIC_SCALES,
ROTATIONAL_SCALES, LOCAL_SCALES) for scale in scales)
TIME_DELTA_SCALES = GEOCENTRIC_SCALES + BARYCENTRIC_SCALES + ROTATIONAL_SCALES + LOCAL_SCALES
# For time scale changes, we need L_G and L_B, which are stored in erfam.h as
# /* L_G = 1 - d(TT)/d(TCG) */
# define ERFA_ELG (6.969290134e-10)
# /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */
# define ERFA_ELB (1.550519768e-8)
# These are exposed in erfa as erfa.ELG and erfa.ELB.
# Implied: d(TT)/d(TCG) = 1-L_G
# and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G)
# scale offsets as second = first + first * scale_offset[(first,second)]
SCALE_OFFSETS = {('tt', 'tai'): None,
('tai', 'tt'): None,
('tcg', 'tt'): -erfa.ELG,
('tt', 'tcg'): erfa.ELG / (1. - erfa.ELG),
('tcg', 'tai'): -erfa.ELG,
('tai', 'tcg'): erfa.ELG / (1. - erfa.ELG),
('tcb', 'tdb'): -erfa.ELB,
('tdb', 'tcb'): erfa.ELB / (1. - erfa.ELB)}
# triple-level dictionary, yay!
SIDEREAL_TIME_MODELS = {
'mean': {
'IAU2006': {'function': erfa.gmst06, 'scales': ('ut1', 'tt')},
'IAU2000': {'function': erfa.gmst00, 'scales': ('ut1', 'tt')},
'IAU1982': {'function': erfa.gmst82, 'scales': ('ut1',)}},
'apparent': {
'IAU2006A': {'function': erfa.gst06a, 'scales': ('ut1', 'tt')},
'IAU2000A': {'function': erfa.gst00a, 'scales': ('ut1', 'tt')},
'IAU2000B': {'function': erfa.gst00b, 'scales': ('ut1',)},
'IAU1994': {'function': erfa.gst94, 'scales': ('ut1',)}}}
class TimeInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = set(['unit']) # unit is read-only and None
attr_names = MixinInfo.attr_names | {'serialize_method'}
_supports_indexing = True
# The usual tuple of attributes needed for serialization is replaced
# by a property, since Time can be serialized different ways.
_represent_as_dict_extra_attrs = ('format', 'scale', 'precision',
'in_subfmt', 'out_subfmt', 'location',
'_delta_ut1_utc', '_delta_tdb_tt')
# When serializing, write out the `value` attribute using the column name.
_represent_as_dict_primary_data = 'value'
mask_val = np.ma.masked
@property
def _represent_as_dict_attrs(self):
method = self.serialize_method[self._serialize_context]
if method == 'formatted_value':
out = ('value',)
elif method == 'jd1_jd2':
out = ('jd1', 'jd2')
else:
raise ValueError("serialize method must be 'formatted_value' or 'jd1_jd2'")
return out + self._represent_as_dict_extra_attrs
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
# If ``True`` for a context, then use formatted ``value`` attribute
# (e.g. the ISO time string). If ``False`` then use float jd1 and jd2.
self.serialize_method = {'fits': 'jd1_jd2',
'ecsv': 'formatted_value',
'hdf5': 'jd1_jd2',
'yaml': 'jd1_jd2',
None: 'jd1_jd2'}
@property
def unit(self):
return None
info_summary_stats = staticmethod(
data_info_factory(names=MixinInfo._stats,
funcs=[getattr(np, stat) for stat in MixinInfo._stats]))
# When Time has mean, std, min, max methods:
# funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats])
def _construct_from_dict_base(self, map):
if 'jd1' in map and 'jd2' in map:
format = map.pop('format')
map['format'] = 'jd'
map['val'] = map.pop('jd1')
map['val2'] = map.pop('jd2')
else:
format = map['format']
map['val'] = map.pop('value')
out = self._parent_cls(**map)
out.format = format
return out
def _construct_from_dict(self, map):
delta_ut1_utc = map.pop('_delta_ut1_utc', None)
delta_tdb_tt = map.pop('_delta_tdb_tt', None)
out = self._construct_from_dict_base(map)
if delta_ut1_utc is not None:
out._delta_ut1_utc = delta_ut1_utc
if delta_tdb_tt is not None:
out._delta_tdb_tt = delta_tdb_tt
return out
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Time instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'description'))
attrs.pop('dtype') # Not relevant for Time
col0 = cols[0]
# Check that location is consistent for all Time objects
for col in cols[1:]:
# This is the method used by __setitem__ to ensure that the right side
# has a consistent location (and coerce data if necessary, but that does
# not happen in this case since `col` is already a Time object). If this
# passes then any subsequent table operations via setitem will work.
try:
col0._make_value_equivalent(slice(None), col)
except ValueError:
raise ValueError('input columns have inconsistent locations')
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop('shape')
jd2000 = 2451544.5 # Arbitrary JD value J2000.0 that will work with ERFA
jd1 = np.full(shape, jd2000, dtype='f8')
jd2 = np.zeros(shape, dtype='f8')
tm_attrs = {attr: getattr(col0, attr)
for attr in ('scale', 'location',
'precision', 'in_subfmt', 'out_subfmt')}
out = self._parent_cls(jd1, jd2, format='jd', **tm_attrs)
out.format = col0.format
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeDeltaInfo(TimeInfo):
_represent_as_dict_extra_attrs = ('format', 'scale')
def _construct_from_dict(self, map):
return self._construct_from_dict_base(map)
class Time(ShapedLikeNDArray):
"""
Represent and manipulate times and dates for astronomy.
A `Time` object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format`` and must correspond to the specified time ``scale``. The
optional ``val2`` time input should be supplied only for numeric input
formats (e.g. JD) where very high precision (better than 64-bit precision)
is required.
The allowed values for ``format`` can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps', 'plot_date',
'datetime', 'iso', 'isot', 'yday', 'fits', 'byear', 'jyear', 'byear_str',
'jyear_str']
Parameters
----------
val : sequence, ndarray, number, str, bytes, or `~astropy.time.Time` object
Value(s) to initialize the time or times. Bytes are decoded as ascii.
val2 : sequence, ndarray, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
format : str, optional
Format of input value(s)
scale : str, optional
Time scale of input value(s), must be one of the following:
('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
precision : int, optional
Digits of precision in string representation of time
in_subfmt : str, optional
Subformat for inputting string times
out_subfmt : str, optional
Subformat for outputting string times
location : `~astropy.coordinates.EarthLocation` or tuple, optional
If given as an tuple, it should be able to initialize an
an EarthLocation instance, i.e., either contain 3 items with units of
length for geocentric coordinates, or contain a longitude, latitude,
and an optional height for geodetic coordinates.
Can be a single location, or one for each input time.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_SCALES
"""List of time scales"""
FORMATS = TIME_FORMATS
"""Dict of time formats"""
# Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__)
# gets called over the __mul__ of Numpy arrays.
__array_priority__ = 20000
# Declare that Time can be used as a Table column by defining the
# attribute where column attributes will be stored.
_astropy_column_attrs = None
def __new__(cls, val, val2=None, format=None, scale=None,
precision=None, in_subfmt=None, out_subfmt=None,
location=None, copy=False):
if isinstance(val, cls):
self = val.replicate(format=format, copy=copy)
else:
self = super().__new__(cls)
return self
def __getnewargs__(self):
return (self._time,)
def __init__(self, val, val2=None, format=None, scale=None,
precision=None, in_subfmt=None, out_subfmt=None,
location=None, copy=False):
if location is not None:
from ..coordinates import EarthLocation
if isinstance(location, EarthLocation):
self.location = location
else:
self.location = EarthLocation(*location)
if self.location.size == 1:
self.location = self.location.squeeze()
else:
self.location = None
if isinstance(val, self.__class__):
# Update _time formatting parameters if explicitly specified
if precision is not None:
self._time.precision = precision
if in_subfmt is not None:
self._time.in_subfmt = in_subfmt
if out_subfmt is not None:
self._time.out_subfmt = out_subfmt
self.SCALES = TIME_TYPES[self.scale]
if scale is not None:
self._set_scale(scale)
else:
self._init_from_vals(val, val2, format, scale, copy,
precision, in_subfmt, out_subfmt)
self.SCALES = TIME_TYPES[self.scale]
if self.location is not None and (self.location.size > 1 and
self.location.shape != self.shape):
try:
# check the location can be broadcast to self's shape.
self.location = np.broadcast_to(self.location, self.shape,
subok=True)
except Exception:
raise ValueError('The location with shape {0} cannot be '
'broadcast against time with shape {1}. '
'Typically, either give a single location or '
'one for each time.'
.format(self.location.shape, self.shape))
def _init_from_vals(self, val, val2, format, scale, copy,
precision=None, in_subfmt=None, out_subfmt=None):
"""
Set the internal _format, scale, and _time attrs from user
inputs. This handles coercion into the correct shapes and
some basic input validation.
"""
if precision is None:
precision = 3
if in_subfmt is None:
in_subfmt = '*'
if out_subfmt is None:
out_subfmt = '*'
# Coerce val into an array
val = _make_array(val, copy)
# If val2 is not None, ensure consistency
if val2 is not None:
val2 = _make_array(val2, copy)
try:
np.broadcast(val, val2)
except ValueError:
raise ValueError('Input val and val2 have inconsistent shape; '
'they cannot be broadcast together.')
if scale is not None:
if not (isinstance(scale, str) and
scale.lower() in self.SCALES):
raise ScaleValueError("Scale {0!r} is not in the allowed scales "
"{1}".format(scale,
sorted(self.SCALES)))
# If either of the input val, val2 are masked arrays then
# find the masked elements and fill them.
mask, val, val2 = _check_for_masked_and_fill(val, val2)
# Parse / convert input values into internal jd1, jd2 based on format
self._time = self._get_time_fmt(val, val2, format, scale,
precision, in_subfmt, out_subfmt)
self._format = self._time.name
# If any inputs were masked then masked jd2 accordingly. From above
# routine ``mask`` must be either Python bool False or an bool ndarray
# with shape broadcastable to jd2.
if mask is not False:
mask = np.broadcast_to(mask, self._time.jd2.shape)
self._time.jd2[mask] = np.nan
def _get_time_fmt(self, val, val2, format, scale,
precision, in_subfmt, out_subfmt):
"""
Given the supplied val, val2, format and scale try to instantiate
the corresponding TimeFormat class to convert the input values into
the internal jd1 and jd2.
If format is `None` and the input is a string-type or object array then
guess available formats and stop when one matches.
"""
if format is None and val.dtype.kind in ('S', 'U', 'O'):
formats = [(name, cls) for name, cls in self.FORMATS.items()
if issubclass(cls, TimeUnique)]
err_msg = ('any of the formats where the format keyword is '
'optional {0}'.format([name for name, cls in formats]))
# AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry,
# but try to guess it at the end.
formats.append(('astropy_time', TimeAstropyTime))
elif not (isinstance(format, str) and
format.lower() in self.FORMATS):
if format is None:
raise ValueError("No time format was given, and the input is "
"not unique")
else:
raise ValueError("Format {0!r} is not one of the allowed "
"formats {1}".format(format,
sorted(self.FORMATS)))
else:
formats = [(format, self.FORMATS[format])]
err_msg = 'the format class {0}'.format(format)
for format, FormatClass in formats:
try:
return FormatClass(val, val2, scale, precision, in_subfmt, out_subfmt)
except UnitConversionError:
raise
except (ValueError, TypeError):
pass
else:
raise ValueError('Input values did not match {0}'.format(err_msg))
@classmethod
def now(cls):
"""
Creates a new object corresponding to the instant in time this
method is called.
.. note::
"Now" is determined using the `~datetime.datetime.utcnow`
function, so its accuracy and precision is determined by that
function. Generally that means it is set by the accuracy of
your system clock.
Returns
-------
nowtime
A new `Time` object (or a subclass of `Time` if this is called from
such a subclass) at the current time.
"""
# call `utcnow` immediately to be sure it's ASAP
dtnow = datetime.utcnow()
return cls(val=dtnow, format='datetime', scale='utc')
info = TimeInfo()
@classmethod
def strptime(cls, time_string, format_string, **kwargs):
"""
Parse a string to a Time according to a format specification.
See `time.strptime` documentation for format specification.
>>> Time.strptime('2012-Jun-30 23:59:60', '%Y-%b-%d %H:%M:%S')
<Time object: scale='utc' format='isot' value=2012-06-30T23:59:60.000>
Parameters
----------
time_string : string, sequence, ndarray
Objects containing time data of type string
format_string : string
String specifying format of time_string.
kwargs : dict
Any keyword arguments for ``Time``. If the ``format`` keyword
argument is present, this will be used as the Time format.
Returns
-------
time_obj : `~astropy.time.Time`
A new `~astropy.time.Time` object corresponding to the input
``time_string``.
"""
time_array = np.asarray(time_string)
if time_array.dtype.kind not in ('U', 'S'):
err = "Expected type is string, a bytes-like object or a sequence"\
" of these. Got dtype '{}'".format(time_array.dtype.kind)
raise TypeError(err)
to_string = (str if time_array.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([time_array, None],
op_dtypes=[time_array.dtype, 'U30'])
for time, formatted in iterator:
time_tuple = strptime(to_string(time), format_string)
formatted[...] = '{:04}-{}-{}T{}:{}:{}'.format(*time_tuple)
format = kwargs.pop('format', None)
out = cls(*iterator.operands[1:], format='isot', **kwargs)
if format is not None:
out.format = format
return out
@property
def writeable(self):
return self._time.jd1.flags.writeable & self._time.jd2.flags.writeable
@writeable.setter
def writeable(self, value):
self._time.jd1.flags.writeable = value
self._time.jd2.flags.writeable = value
@property
def format(self):
"""
Get or set time format.
The format defines the way times are represented when accessed via the
``.value`` attribute. By default it is the same as the format used for
initializing the `Time` instance, but it can be set to any other value
that could be used for initialization. These can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps', 'plot_date',
'datetime', 'iso', 'isot', 'yday', 'fits', 'byear', 'jyear', 'byear_str',
'jyear_str']
"""
return self._format
@format.setter
def format(self, format):
"""Set time format"""
if format not in self.FORMATS:
raise ValueError('format must be one of {0}'
.format(list(self.FORMATS)))
format_cls = self.FORMATS[format]
# If current output subformat is not in the new format then replace
# with default '*'
if hasattr(format_cls, 'subfmts'):
subfmt_names = [subfmt[0] for subfmt in format_cls.subfmts]
if self.out_subfmt not in subfmt_names:
self.out_subfmt = '*'
self._time = format_cls(self._time.jd1, self._time.jd2,
self._time._scale, self.precision,
in_subfmt=self.in_subfmt,
out_subfmt=self.out_subfmt,
from_jd=True)
self._format = format
def __repr__(self):
return ("<{0} object: scale='{1}' format='{2}' value={3}>"
.format(self.__class__.__name__, self.scale, self.format,
getattr(self, self.format)))
def __str__(self):
return str(getattr(self, self.format))
def strftime(self, format_spec):
"""
Convert Time to a string or a numpy.array of strings according to a
format specification.
See `time.strftime` documentation for format specification.
Parameters
----------
format_spec : string
Format definition of return string.
Returns
-------
formatted : string, numpy.array
String or numpy.array of strings formatted according to the given
format string.
"""
formatted_strings = []
for sk in self.replicate('iso')._time.str_kwargs():
date_tuple = date(sk['year'], sk['mon'], sk['day']).timetuple()
datetime_tuple = (sk['year'], sk['mon'], sk['day'],
sk['hour'], sk['min'], sk['sec'],
date_tuple[6], date_tuple[7], -1)
formatted_strings.append(strftime(format_spec, datetime_tuple))
if self.isscalar:
return formatted_strings[0]
else:
return np.array(formatted_strings).reshape(self.shape)
@property
def scale(self):
"""Time scale"""
return self._time.scale
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError("Scale {0!r} is not in the allowed scales {1}"
.format(scale, sorted(self.SCALES)))
# Determine the chain of scale transformations to get from the current
# scale to the new scale. MULTI_HOPS contains a dict of all
# transformations (xforms) that require intermediate xforms.
# The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order.
xform = (self.scale, scale)
xform_sort = tuple(sorted(xform))
multi = MULTI_HOPS.get(xform_sort, ())
xforms = xform_sort[:1] + multi + xform_sort[-1:]
# If we made the reverse xform then reverse it now.
if xform_sort != xform:
xforms = tuple(reversed(xforms))
# Transform the jd1,2 pairs through the chain of scale xforms.
jd1, jd2 = self._time.jd1, self._time.jd2_filled
for sys1, sys2 in zip(xforms[:-1], xforms[1:]):
# Some xforms require an additional delta_ argument that is
# provided through Time methods. These values may be supplied by
# the user or computed based on available approximations. The
# get_delta_ methods are available for only one combination of
# sys1, sys2 though the property applies for both xform directions.
args = [jd1, jd2]
for sys12 in ((sys1, sys2), (sys2, sys1)):
dt_method = '_get_delta_{0}_{1}'.format(*sys12)
try:
get_dt = getattr(self, dt_method)
except AttributeError:
pass
else:
args.append(get_dt(jd1, jd2))
break
conv_func = getattr(erfa, sys1 + sys2)
jd1, jd2 = conv_func(*args)
if self.masked:
jd2[self.mask] = np.nan
self._time = self.FORMATS[self.format](jd1, jd2, scale, self.precision,
self.in_subfmt, self.out_subfmt,
from_jd=True)
@property
def precision(self):
"""
Decimal precision when outputting seconds as floating point (int
value between 0 and 9 inclusive).
"""
return self._time.precision
@precision.setter
def precision(self, val):
del self.cache
if not isinstance(val, int) or val < 0 or val > 9:
raise ValueError('precision attribute must be an int between '
'0 and 9')
self._time.precision = val
@property
def in_subfmt(self):
"""
Unix wildcard pattern to select subformats for parsing string input
times.
"""
return self._time.in_subfmt
@in_subfmt.setter
def in_subfmt(self, val):
del self.cache
if not isinstance(val, str):
raise ValueError('in_subfmt attribute must be a string')
self._time.in_subfmt = val
@property
def out_subfmt(self):
"""
Unix wildcard pattern to select subformats for outputting times.
"""
return self._time.out_subfmt
@out_subfmt.setter
def out_subfmt(self, val):
del self.cache
if not isinstance(val, str):
raise ValueError('out_subfmt attribute must be a string')
self._time.out_subfmt = val
@property
def shape(self):
"""The shape of the time instances.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
AttributeError
If the shape of the ``jd1``, ``jd2``, ``location``,
``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed
without the arrays being copied. For these cases, use the
`Time.reshape` method (which copies any arrays that cannot be
reshaped in-place).
"""
return self._time.jd1.shape
@shape.setter
def shape(self, shape):
del self.cache
# We have to keep track of arrays that were already reshaped,
# since we may have to return those to their original shape if a later
# shape-setting fails.
reshaped = []
oldshape = self.shape
# In-place reshape of data/attributes. Need to access _time.jd1/2 not
# self.jd1/2 because the latter are not guaranteed to be the actual
# data, and in fact should not be directly changeable from the public
# API.
for obj, attr in ((self._time, 'jd1'),
(self._time, 'jd2'),
(self, '_delta_ut1_utc'),
(self, '_delta_tdb_tt'),
(self, 'location')):
val = getattr(obj, attr, None)
if val is not None and val.size > 1:
try:
val.shape = shape
except AttributeError:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
def _shaped_like_input(self, value):
out = value
if not self._time.jd1.shape and not np.ma.is_masked(value):
out = value.item()
return out
@property
def jd1(self):
"""
First of the two doubles that internally store time value(s) in JD.
"""
jd1 = self._time.mask_if_needed(self._time.jd1)
return self._shaped_like_input(jd1)
@property
def jd2(self):
"""
Second of the two doubles that internally store time value(s) in JD.
"""
jd2 = self._time.mask_if_needed(self._time.jd2)
return self._shaped_like_input(jd2)
@property
def value(self):
"""Time value(s) in current format"""
# The underlying way to get the time values for the current format is:
# self._shaped_like_input(self._time.to_value(parent=self))
# This is done in __getattr__. By calling getattr(self, self.format)
# the ``value`` attribute is cached.
return getattr(self, self.format)
@property
def masked(self):
return self._time.masked
@property
def mask(self):
return self._time.mask
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent Time object"""
# If there is a vector location then broadcast to the Time shape
# and then select with ``item``
if self.location is not None and self.location.shape:
self_location = np.broadcast_to(self.location, self.shape, subok=True)[item]
else:
self_location = self.location
if isinstance(value, Time):
# Make sure locations are compatible. Location can be either None or
# a Location object.
if self_location is None and value.location is None:
match = True
elif ((self_location is None and value.location is not None) or
(self_location is not None and value.location is None)):
match = False
else:
match = np.all(self_location == value.location)
if not match:
raise ValueError('cannot set to Time with different location: '
'expected location={} and '
'got location={}'
.format(self_location, value.location))
else:
try:
value = self.__class__(value, scale=self.scale, location=self_location)
except Exception:
try:
value = self.__class__(value, scale=self.scale, format=self.format,
location=self_location)
except Exception as err:
raise ValueError('cannot convert value to a compatible Time object: {}'
.format(err))
return value
def __setitem__(self, item, value):
if not self.writeable:
if self.shape:
raise ValueError('{} object is read-only. Make a '
'copy() or set "writeable" attribute to True.'
.format(self.__class__.__name__))
else:
raise ValueError('scalar {} object is read-only.'
.format(self.__class__.__name__))
# Any use of setitem results in immediate cache invalidation
del self.cache
# Setting invalidates transform deltas
for attr in ('_delta_tdb_tt', '_delta_ut1_utc'):
if hasattr(self, attr):
delattr(self, attr)
if value is np.ma.masked or value is np.nan:
self._time.jd2[item] = np.nan
return
value = self._make_value_equivalent(item, value)
# Finally directly set the jd1/2 values. Locations are known to match.
if self.scale is not None:
value = getattr(value, self.scale)
self._time.jd1[item] = value._time.jd1
self._time.jd2[item] = value._time.jd2
def light_travel_time(self, skycoord, kind='barycentric', location=None, ephemeris=None):
"""Light travel time correction to the barycentre or heliocentre.
The frame transformations used to calculate the location of the solar
system barycentre and the heliocentre rely on the erfa routine epv00,
which is consistent with the JPL DE405 ephemeris to an accuracy of
11.2 km, corresponding to a light travel time of 4 microseconds.
The routine assumes the source(s) are at large distance, i.e., neglects
finite-distance effects.
Parameters
----------
skycoord : `~astropy.coordinates.SkyCoord`
The sky location to calculate the correction for.
kind : str, optional
``'barycentric'`` (default) or ``'heliocentric'``
location : `~astropy.coordinates.EarthLocation`, optional
The location of the observatory to calculate the correction for.
If no location is given, the ``location`` attribute of the Time
object is used
ephemeris : str, optional
Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default,
use the one set with ``astropy.coordinates.solar_system_ephemeris.set``.
For more information, see `~astropy.coordinates.solar_system_ephemeris`.
Returns
-------
time_offset : `~astropy.time.TimeDelta`
The time offset between the barycentre or Heliocentre and Earth,
in TDB seconds. Should be added to the original time to get the
time in the Solar system barycentre or the Heliocentre.
Also, the time conversion to BJD will then include the relativistic correction as well.
"""
if kind.lower() not in ('barycentric', 'heliocentric'):
raise ValueError("'kind' parameter must be one of 'heliocentric' "
"or 'barycentric'")
if location is None:
if self.location is None:
raise ValueError('An EarthLocation needs to be set or passed '
'in to calculate bary- or heliocentric '
'corrections')
location = self.location
from ..coordinates import (UnitSphericalRepresentation, CartesianRepresentation,
HCRS, ICRS, GCRS, solar_system_ephemeris)
# ensure sky location is ICRS compatible
if not skycoord.is_transformable_to(ICRS()):
raise ValueError("Given skycoord is not transformable to the ICRS")
# get location of observatory in ITRS coordinates at this Time
try:
itrs = location.get_itrs(obstime=self)
except Exception:
raise ValueError("Supplied location does not have a valid `get_itrs` method")
with solar_system_ephemeris.set(ephemeris):
if kind.lower() == 'heliocentric':
# convert to heliocentric coordinates, aligned with ICRS
cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz
else:
# first we need to convert to GCRS coordinates with the correct
# obstime, since ICRS coordinates have no frame time
gcrs_coo = itrs.transform_to(GCRS(obstime=self))
# convert to barycentric (BCRS) coordinates, aligned with ICRS
cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz
# get unit ICRS vector to star
spos = (skycoord.icrs.represent_as(UnitSphericalRepresentation).
represent_as(CartesianRepresentation).xyz)
# Move X,Y,Z to last dimension, to enable possible broadcasting below.
cpos = np.rollaxis(cpos, 0, cpos.ndim)
spos = np.rollaxis(spos, 0, spos.ndim)
# calculate light travel time correction
tcor_val = (spos * cpos).sum(axis=-1) / const.c
return TimeDelta(tcor_val, scale='tdb')
def sidereal_time(self, kind, longitude=None, model=None):
"""Calculate sidereal time.
Parameters
---------------
kind : str
``'mean'`` or ``'apparent'``, i.e., accounting for precession
only, or also for nutation.
longitude : `~astropy.units.Quantity`, `str`, or `None`; optional
The longitude on the Earth at which to compute the sidereal time.
Can be given as a `~astropy.units.Quantity` with angular units
(or an `~astropy.coordinates.Angle` or
`~astropy.coordinates.Longitude`), or as a name of an
observatory (currently, only ``'greenwich'`` is supported,
equivalent to 0 deg). If `None` (default), the ``lon`` attribute of
the Time object is used.
model : str or `None`; optional
Precession (and nutation) model to use. The available ones are:
- {0}: {1}
- {2}: {3}
If `None` (default), the last (most recent) one from the appropriate
list above is used.
Returns
-------
sidereal time : `~astropy.coordinates.Longitude`
Sidereal time as a quantity with units of hourangle
""" # docstring is formatted below
from ..coordinates import Longitude
if kind.lower() not in SIDEREAL_TIME_MODELS.keys():
raise ValueError('The kind of sidereal time has to be {0}'.format(
' or '.join(sorted(SIDEREAL_TIME_MODELS.keys()))))
available_models = SIDEREAL_TIME_MODELS[kind.lower()]
if model is None:
model = sorted(available_models.keys())[-1]
else:
if model.upper() not in available_models:
raise ValueError(
'Model {0} not implemented for {1} sidereal time; '
'available models are {2}'
.format(model, kind, sorted(available_models.keys())))
if longitude is None:
if self.location is None:
raise ValueError('No longitude is given but the location for '
'the Time object is not set.')
longitude = self.location.lon
elif longitude == 'greenwich':
longitude = Longitude(0., u.degree,
wrap_angle=180.*u.degree)
else:
# sanity check on input
longitude = Longitude(longitude, u.degree,
wrap_angle=180.*u.degree)
gst = self._erfa_sidereal_time(available_models[model.upper()])
return Longitude(gst + longitude, u.hourangle)
if isinstance(sidereal_time.__doc__, str):
sidereal_time.__doc__ = sidereal_time.__doc__.format(
'apparent', sorted(SIDEREAL_TIME_MODELS['apparent'].keys()),
'mean', sorted(SIDEREAL_TIME_MODELS['mean'].keys()))
def _erfa_sidereal_time(self, model):
"""Calculate a sidereal time using a IAU precession/nutation model."""
from ..coordinates import Longitude
erfa_function = model['function']
erfa_parameters = [getattr(getattr(self, scale)._time, jd_part)
for scale in model['scales']
for jd_part in ('jd1', 'jd2_filled')]
sidereal_time = erfa_function(*erfa_parameters)
if self.masked:
sidereal_time[self.mask] = np.nan
return Longitude(sidereal_time, u.radian).to(u.hourangle)
def copy(self, format=None):
"""
Return a fully independent copy the Time object, optionally changing
the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
In this method a full copy of the internal time arrays will be made.
The internal time arrays are normally not changeable by the user so in
most cases the ``replicate()`` method should be used.
Parameters
----------
format : str, optional
Time format of the copy.
Returns
-------
tm : Time object
Copy of this object
"""
return self._apply('copy', format=format)
def replicate(self, format=None, copy=False):
"""
Return a replica of the Time object, optionally changing the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
If ``copy`` is set to `True` then a full copy of the internal time arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory. The internal time arrays
are normally not changeable by the user so in most cases it should not
be necessary to set ``copy`` to `True`.
The convenience method copy() is available in which ``copy`` is `True`
by default.
Parameters
----------
format : str, optional
Time format of the replica.
copy : bool, optional
Return a true copy instead of using references where possible.
Returns
-------
tm : Time object
Replica of this object
"""
return self._apply('copy' if copy else 'replicate', format=format)
def _apply(self, method, *args, format=None, **kwargs):
"""Create a new time object, possibly applying a method to the arrays.
Parameters
----------
method : str or callable
If string, can be 'replicate' or the name of a relevant
`~numpy.ndarray` method. In the former case, a new time instance
with unchanged internal data is created, while in the latter the
method is applied to the internal ``jd1`` and ``jd2`` arrays, as
well as to possible ``location``, ``_delta_ut1_utc``, and
``_delta_tdb_tt`` arrays.
If a callable, it is directly applied to the above arrays.
Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``. If the ``format`` keyword
argument is present, this will be used as the Time format of the
replica.
Examples
--------
Some ways this is used internally::
copy : ``_apply('copy')``
replicate : ``_apply('replicate')``
reshape : ``_apply('reshape', new_shape)``
index or slice : ``_apply('__getitem__', item)``
broadcast : ``_apply(np.broadcast, shape=new_shape)``
"""
new_format = self.format if format is None else format
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
if method == 'replicate':
apply_method = None
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
jd1, jd2 = self._time.jd1, self._time.jd2
if apply_method:
jd1 = apply_method(jd1)
jd2 = apply_method(jd2)
# Get a new instance of our class and set its attributes directly.
tm = super().__new__(self.__class__)
tm._time = TimeJD(jd1, jd2, self.scale, self.precision,
self.in_subfmt, self.out_subfmt, from_jd=True)
# Optional ndarray attributes.
for attr in ('_delta_ut1_utc', '_delta_tdb_tt', 'location',
'precision', 'in_subfmt', 'out_subfmt'):
try:
val = getattr(self, attr)
except AttributeError:
continue
if apply_method:
# Apply the method to any value arrays (though skip if there is
# only a single element and the method would return a view,
# since in that case nothing would change).
if getattr(val, 'size', 1) > 1:
val = apply_method(val)
elif method == 'copy' or method == 'flatten':
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
val = copy.copy(val)
setattr(tm, attr, val)
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if 'info' in self.__dict__:
tm.info = self.info
# Make the new internal _time object corresponding to the format
# in the copy. If the format is unchanged this process is lightweight
# and does not create any new arrays.
if new_format not in tm.FORMATS:
raise ValueError('format must be one of {0}'
.format(list(tm.FORMATS)))
NewFormat = tm.FORMATS[new_format]
tm._time = NewFormat(tm._time.jd1, tm._time.jd2,
tm._time._scale, tm.precision,
tm.in_subfmt, tm.out_subfmt,
from_jd=True)
tm._format = new_format
tm.SCALES = self.SCALES
return tm
def __copy__(self):
"""
Overrides the default behavior of the `copy.copy` function in
the python stdlib to behave like `Time.copy`. Does *not* make a
copy of the JD arrays - only copies by reference.
"""
return self.replicate()
def __deepcopy__(self, memo):
"""
Overrides the default behavior of the `copy.deepcopy` function
in the python stdlib to behave like `Time.copy`. Does make a
copy of the JD arrays.
"""
return self.copy()
def _advanced_index(self, indices, axis=None, keepdims=False):
"""Turn argmin, argmax output into an advanced index.
Argmin, argmax output contains indices along a given axis in an array
shaped like the other dimensions. To use this to get values at the
correct location, a list is constructed in which the other axes are
indexed sequentially. For ``keepdims`` is ``True``, the net result is
the same as constructing an index grid with ``np.ogrid`` and then
replacing the ``axis`` item with ``indices`` with its shaped expanded
at ``axis``. For ``keepdims`` is ``False``, the result is the same but
with the ``axis`` dimension removed from all list entries.
For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`.
Parameters
----------
indices : array
Output of argmin or argmax.
axis : int or None
axis along which argmin or argmax was used.
keepdims : bool
Whether to construct indices that keep or remove the axis along
which argmin or argmax was used. Default: ``False``.
Returns
-------
advanced_index : list of arrays
Suitable for use as an advanced index.
"""
if axis is None:
return np.unravel_index(indices, self.shape)
ndim = self.ndim
if axis < 0:
axis = axis + ndim
if keepdims and indices.ndim < self.ndim:
indices = np.expand_dims(indices, axis)
return [(indices if i == axis else np.arange(s).reshape(
(1,)*(i if keepdims or i < axis else i-1) + (s,) +
(1,)*(ndim-i-(1 if keepdims or i > axis else 2))))
for i, s in enumerate(self.shape)]
def argmin(self, axis=None, out=None):
"""Return indices of the minimum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmin` for detailed documentation.
"""
# first get the minimum at normal precision.
jd = self.jd1 + self.jd2
approx = np.min(jd, axis, keepdims=True)
# Approx is very close to the true minimum, and by subtracting it at
# full precision, all numbers near 0 can be represented correctly,
# so we can be sure we get the true minimum.
# The below is effectively what would be done for
# dt = (self - self.__class__(approx, format='jd')).jd
# which translates to:
# approx_jd1, approx_jd2 = day_frac(approx, 0.)
# dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2)
dt = (self.jd1 - approx) + self.jd2
return dt.argmin(axis, out)
def argmax(self, axis=None, out=None):
"""Return indices of the maximum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmax` for detailed documentation.
"""
# For procedure, see comment on argmin.
jd = self.jd1 + self.jd2
approx = np.max(jd, axis, keepdims=True)
dt = (self.jd1 - approx) + self.jd2
return dt.argmax(axis, out)
def argsort(self, axis=-1):
"""Returns the indices that would sort the time array.
This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied. Internally,
it uses :func:`~numpy.lexsort`, and hence no sort method can be chosen.
"""
jd_approx = self.jd
jd_remainder = (self - self.__class__(jd_approx, format='jd')).jd
if axis is None:
return np.lexsort((jd_remainder.ravel(), jd_approx.ravel()))
else:
return np.lexsort(keys=(jd_remainder, jd_approx), axis=axis)
def min(self, axis=None, out=None, keepdims=False):
"""Minimum along a given axis.
This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.min``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return self[self._advanced_index(self.argmin(axis), axis, keepdims)]
def max(self, axis=None, out=None, keepdims=False):
"""Maximum along a given axis.
This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.max``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return self[self._advanced_index(self.argmax(axis), axis, keepdims)]
def ptp(self, axis=None, out=None, keepdims=False):
"""Peak to peak (maximum - minimum) along a given axis.
This is similar to :meth:`~numpy.ndarray.ptp`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used.
Note that the ``out`` argument is present only for compatibility with
`~numpy.ptp`; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return (self.max(axis, keepdims=keepdims) -
self.min(axis, keepdims=keepdims))
def sort(self, axis=-1):
"""Return a copy sorted along the specified axis.
This is similar to :meth:`~numpy.ndarray.sort`, but internally uses
indexing with :func:`~numpy.lexsort` to ensure that the full precision
given by the two doubles ``jd1`` and ``jd2`` is kept, and that
corresponding attributes are properly sorted and copied as well.
Parameters
----------
axis : int or None
Axis to be sorted. If ``None``, the flattened array is sorted.
By default, sort over the last axis.
"""
return self[self._advanced_index(self.argsort(axis), axis,
keepdims=True)]
@property
def cache(self):
"""
Return the cache associated with this instance.
"""
return self._time.cache
@cache.deleter
def cache(self):
del self._time.cache
def __getattr__(self, attr):
"""
Get dynamic attributes to output format or do timescale conversion.
"""
if attr in self.SCALES and self.scale is not None:
cache = self.cache['scale']
if attr not in cache:
if attr == self.scale:
tm = self
else:
tm = self.replicate()
tm._set_scale(attr)
if tm.shape:
# Prevent future modification of cached array-like object
tm.writeable = False
cache[attr] = tm
return cache[attr]
elif attr in self.FORMATS:
cache = self.cache['format']
if attr not in cache:
if attr == self.format:
tm = self
else:
tm = self.replicate(format=attr)
value = tm._shaped_like_input(tm._time.to_value(parent=tm))
cache[attr] = value
return cache[attr]
elif attr in TIME_SCALES: # allowed ones done above (self.SCALES)
if self.scale is None:
raise ScaleValueError("Cannot convert TimeDelta with "
"undefined scale to any defined scale.")
else:
raise ScaleValueError("Cannot convert {0} with scale "
"'{1}' to scale '{2}'"
.format(self.__class__.__name__,
self.scale, attr))
else:
# Should raise AttributeError
return self.__getattribute__(attr)
@override__dir__
def __dir__(self):
result = set(self.SCALES)
result.update(self.FORMATS)
return result
def _match_shape(self, val):
"""
Ensure that `val` is matched to length of self. If val has length 1
then broadcast, otherwise cast to double and make sure shape matches.
"""
val = _make_array(val, copy=True) # be conservative and copy
if val.size > 1 and val.shape != self.shape:
try:
# check the value can be broadcast to the shape of self.
val = np.broadcast_to(val, self.shape, subok=True)
except Exception:
raise ValueError('Attribute shape must match or be '
'broadcastable to that of Time object. '
'Typically, give either a single value or '
'one for each time.')
return val
def get_delta_ut1_utc(self, iers_table=None, return_status=False):
"""Find UT1 - UTC differences by interpolating in IERS Table.
Parameters
----------
iers_table : ``astropy.utils.iers.IERS`` table, optional
Table containing UT1-UTC differences from IERS Bulletins A
and/or B. If `None`, use default version (see
``astropy.utils.iers``)
return_status : bool
Whether to return status values. If `False` (default), iers
raises `IndexError` if any time is out of the range
covered by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status=`True```)::
``astropy.utils.iers.FROM_IERS_B``
``astropy.utils.iers.FROM_IERS_A``
``astropy.utils.iers.FROM_IERS_A_PREDICTION``
``astropy.utils.iers.TIME_BEFORE_IERS_RANGE``
``astropy.utils.iers.TIME_BEYOND_IERS_RANGE``
Notes
-----
In normal usage, UT1-UTC differences are calculated automatically
on the first instance ut1 is needed.
Examples
--------
To check in code whether any times are before the IERS table range::
>>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE
>>> t = Time(['1961-01-01', '2000-01-01'], scale='utc')
>>> delta, status = t.get_delta_ut1_utc(return_status=True)
>>> status == TIME_BEFORE_IERS_RANGE
array([ True, False]...)
"""
if iers_table is None:
from ..utils.iers import IERS
iers_table = IERS.open()
return iers_table.ut1_utc(self.utc, return_status=return_status)
# Property for ERFA DUT arg = UT1 - UTC
def _get_delta_ut1_utc(self, jd1=None, jd2=None):
"""
Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and
jd2 args because it gets called that way when converting time scales.
If delta_ut1_utc is not yet set, this will interpolate them from the
the IERS table.
"""
# Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in
# seconds. It is obtained from tables published by the IERS.
if not hasattr(self, '_delta_ut1_utc'):
from ..utils.iers import IERS_Auto
iers_table = IERS_Auto.open()
# jd1, jd2 are normally set (see above), except if delta_ut1_utc
# is access directly; ensure we behave as expected for that case
if jd1 is None:
self_utc = self.utc
jd1, jd2 = self_utc._time.jd1, self_utc._time.jd2_filled
scale = 'utc'
else:
scale = self.scale
# interpolate UT1-UTC in IERS table
delta = iers_table.ut1_utc(jd1, jd2)
# if we interpolated using UT1 jds, we may be off by one
# second near leap seconds (and very slightly off elsewhere)
if scale == 'ut1':
# calculate UTC using the offset we got; the ERFA routine
# is tolerant of leap seconds, so will do this right
jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta.to_value(u.s))
# calculate a better estimate using the nearly correct UTC
delta = iers_table.ut1_utc(jd1_utc, jd2_utc)
self._set_delta_ut1_utc(delta)
return self._delta_ut1_utc
def _set_delta_ut1_utc(self, val):
del self.cache
if hasattr(val, 'to'): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_ut1_utc = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc)
"""UT1 - UTC time scale offset"""
# Property for ERFA DTR arg = TDB - TT
def _get_delta_tdb_tt(self, jd1=None, jd2=None):
if not hasattr(self, '_delta_tdb_tt'):
# If jd1 and jd2 are not provided (which is the case for property
# attribute access) then require that the time scale is TT or TDB.
# Otherwise the computations here are not correct.
if jd1 is None or jd2 is None:
if self.scale not in ('tt', 'tdb'):
raise ValueError('Accessing the delta_tdb_tt attribute '
'is only possible for TT or TDB time '
'scales')
else:
jd1 = self._time.jd1
jd2 = self._time.jd2_filled
# First go from the current input time (which is either
# TDB or TT) to an approximate UT1. Since TT and TDB are
# pretty close (few msec?), assume TT. Similarly, since the
# UT1 terms are very small, use UTC instead of UT1.
njd1, njd2 = erfa.tttai(jd1, jd2)
njd1, njd2 = erfa.taiutc(njd1, njd2)
# subtract 0.5, so UT is fraction of the day from midnight
ut = day_frac(njd1 - 0.5, njd2)[1]
if self.location is None:
from ..coordinates import EarthLocation
location = EarthLocation.from_geodetic(0., 0., 0.)
else:
location = self.location
# Geodetic params needed for d_tdb_tt()
lon = location.lon
rxy = np.hypot(location.x, location.y)
z = location.z
self._delta_tdb_tt = erfa.dtdb(
jd1, jd2, ut, lon.to_value(u.radian),
rxy.to_value(u.km), z.to_value(u.km))
return self._delta_tdb_tt
def _set_delta_tdb_tt(self, val):
del self.cache
if hasattr(val, 'to'): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_tdb_tt = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt)
"""TDB - TT time scale offset"""
def __sub__(self, other):
if not isinstance(other, Time):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# Tdelta - something is dealt with in TimeDelta, so we have
# T - Tdelta = T
# T - T = Tdelta
other_is_delta = isinstance(other, TimeDelta)
# we need a constant scale to calculate, which is guaranteed for
# TimeDelta, but not for Time (which can be UTC)
if other_is_delta: # T - Tdelta
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale('tai')
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError("Cannot subtract Time and TimeDelta instances "
"with scales '{0}' and '{1}'"
.format(self.scale, other.scale))
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ('_delta_ut1_utc', '_delta_tdb_tt'):
if hasattr(out, attr):
delattr(out, attr)
else: # T - T
# the scales should be compatible (e.g., cannot convert TDB to LOCAL)
if other.scale not in self.SCALES:
raise TypeError("Cannot subtract Time instances "
"with scales '{0}' and '{1}'"
.format(self.scale, other.scale))
self_time = (self._time if self.scale in TIME_DELTA_SCALES
else self.tai._time)
# set up TimeDelta, subtraction to be done shortly
out = TimeDelta(self_time.jd1, self_time.jd2, format='jd',
scale=self_time.scale)
if other.scale != out.scale:
other = getattr(other, out.scale)
jd1 = out._time.jd1 - other._time.jd1
jd2 = out._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
if other_is_delta:
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __add__(self, other):
if not isinstance(other, Time):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# Tdelta + something is dealt with in TimeDelta, so we have
# T + Tdelta = T
# T + T = error
if not isinstance(other, TimeDelta):
raise OperandTypeError(self, other, '+')
# ideally, we calculate in the scale of the Time item, since that is
# what we want the output in, but this may not be possible, since
# TimeDelta cannot be converted arbitrarily
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale('tai')
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError("Cannot add Time and TimeDelta instances "
"with scales '{0}' and '{1}'"
.format(self.scale, other.scale))
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ('_delta_ut1_utc', '_delta_tdb_tt'):
if hasattr(out, attr):
delattr(out, attr)
jd1 = out._time.jd1 + other._time.jd1
jd2 = out._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
out = self.__sub__(other)
return -out
def _time_comparison(self, other, op):
"""If other is of same class as self, compare difference in self.scale.
Otherwise, return NotImplemented
"""
if other.__class__ is not self.__class__:
try:
other = self.__class__(other, scale=self.scale)
except Exception:
# Let other have a go.
return NotImplemented
if(self.scale is not None and self.scale not in other.SCALES or
other.scale is not None and other.scale not in self.SCALES):
# Other will also not be able to do it, so raise a TypeError
# immediately, allowing us to explain why it doesn't work.
raise TypeError("Cannot compare {0} instances with scales "
"'{1}' and '{2}'".format(self.__class__.__name__,
self.scale, other.scale))
if self.scale is not None and other.scale is not None:
other = getattr(other, self.scale)
return op((self.jd1 - other.jd1) + (self.jd2 - other.jd2), 0.)
def __lt__(self, other):
return self._time_comparison(other, operator.lt)
def __le__(self, other):
return self._time_comparison(other, operator.le)
def __eq__(self, other):
"""
If other is an incompatible object for comparison, return `False`.
Otherwise, return `True` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.eq)
def __ne__(self, other):
"""
If other is an incompatible object for comparison, return `True`.
Otherwise, return `False` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.ne)
def __gt__(self, other):
return self._time_comparison(other, operator.gt)
def __ge__(self, other):
return self._time_comparison(other, operator.ge)
def to_datetime(self, timezone=None):
tm = self.replicate(format='datetime')
return tm._shaped_like_input(tm._time.to_value(timezone))
to_datetime.__doc__ = TimeDatetime.to_value.__doc__
class TimeDelta(Time):
"""
Represent the time difference between two times.
A TimeDelta object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format``. The optional ``val2`` time input should be supplied only for
numeric input formats (e.g. JD) where very high precision (better than
64-bit precision) is required.
The allowed values for ``format`` can be listed with::
>>> list(TimeDelta.FORMATS)
['sec', 'jd', 'datetime']
Note that for time differences, the scale can be among three groups:
geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational
('ut1'). Within each of these, the scales for time differences are the
same. Conversion between geocentric and barycentric is possible, as there
is only a scale factor change, but one cannot convert to or from 'ut1', as
this requires knowledge of the actual times, not just their difference. For
a similar reason, 'utc' is not a valid scale for a time difference: a UTC
day is not always 86400 seconds.
Parameters
----------
val : sequence, ndarray, number, `~astropy.units.Quantity` or `~astropy.time.TimeDelta` object
Value(s) to initialize the time difference(s). Any quantities will
be converted appropriately (with care taken to avoid rounding
errors for regular time units).
val2 : sequence, ndarray, number, or `~astropy.units.Quantity`; optional
Additional values, as needed to preserve precision.
format : str, optional
Format of input value(s)
scale : str, optional
Time scale of input value(s), must be one of the following values:
('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or
``None``), the scale is arbitrary; when added or subtracted from a
``Time`` instance, it will be used without conversion.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_DELTA_SCALES
"""List of time delta scales."""
FORMATS = TIME_DELTA_FORMATS
"""Dict of time delta formats."""
info = TimeDeltaInfo()
def __init__(self, val, val2=None, format=None, scale=None, copy=False):
if isinstance(val, timedelta) and not format:
format = 'datetime'
if isinstance(val, TimeDelta):
if scale is not None:
self._set_scale(scale)
else:
if format is None:
format = 'jd'
self._init_from_vals(val, val2, format, scale, copy)
if scale is not None:
self.SCALES = TIME_DELTA_TYPES[scale]
def replicate(self, *args, **kwargs):
out = super().replicate(*args, **kwargs)
out.SCALES = self.SCALES
return out
def to_datetime(self):
"""
Convert to ``datetime.timedelta`` object.
"""
tm = self.replicate(format='datetime')
return tm._shaped_like_input(tm._time.value)
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError("Scale {0!r} is not in the allowed scales {1}"
.format(scale, sorted(self.SCALES)))
# For TimeDelta, there can only be a change in scale factor,
# which is written as time2 - time1 = scale_offset * time1
scale_offset = SCALE_OFFSETS[(self.scale, scale)]
if scale_offset is None:
self._time.scale = scale
else:
jd1, jd2 = self._time.jd1, self._time.jd2
offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset)
self._time = self.FORMATS[self.format](
jd1 + offset1, jd2 + offset2, scale,
self.precision, self.in_subfmt,
self.out_subfmt, from_jd=True)
def __add__(self, other):
# only deal with TimeDelta + TimeDelta
if isinstance(other, Time):
if not isinstance(other, TimeDelta):
return other.__add__(self)
else:
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if(self.scale is not None and self.scale not in other.SCALES or
other.scale is not None and other.scale not in self.SCALES):
raise TypeError("Cannot add TimeDelta instances with scales "
"'{0}' and '{1}'".format(self.scale, other.scale))
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = self._time.jd1 + other._time.jd1
jd2 = self._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __sub__(self, other):
# only deal with TimeDelta - TimeDelta
if isinstance(other, Time):
if not isinstance(other, TimeDelta):
raise OperandTypeError(self, other, '-')
else:
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if(self.scale is not None and self.scale not in other.SCALES or
other.scale is not None and other.scale not in self.SCALES):
raise TypeError("Cannot subtract TimeDelta instances with scales "
"'{0}' and '{1}'".format(self.scale, other.scale))
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = self._time.jd1 - other._time.jd1
jd2 = self._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __neg__(self):
"""Negation of a `TimeDelta` object."""
new = self.copy()
new._time.jd1 = -self._time.jd1
new._time.jd2 = -self._time.jd2
return new
def __abs__(self):
"""Absolute value of a `TimeDelta` object."""
jd1, jd2 = self._time.jd1, self._time.jd2
negative = jd1 + jd2 < 0
new = self.copy()
new._time.jd1 = np.where(negative, -jd1, jd1)
new._time.jd2 = np.where(negative, -jd2, jd2)
return new
def __mul__(self, other):
"""Multiplication of `TimeDelta` objects by numbers/arrays."""
# check needed since otherwise the self.jd1 * other multiplication
# would enter here again (via __rmul__)
if isinstance(other, Time):
raise OperandTypeError(self, other, '*')
try: # convert to straight float if dimensionless quantity
other = other.to(1)
except Exception:
pass
try:
jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other)
out = TimeDelta(jd1, jd2, format='jd', scale=self.scale)
except Exception as err: # try downgrading self to a quantity
try:
return self.to(u.day) * other
except Exception:
raise err
if self.format != 'jd':
out = out.replicate(format=self.format)
return out
def __rmul__(self, other):
"""Multiplication of numbers/arrays with `TimeDelta` objects."""
return self.__mul__(other)
def __div__(self, other):
"""Division of `TimeDelta` objects by numbers/arrays."""
return self.__truediv__(other)
def __rdiv__(self, other):
"""Division by `TimeDelta` objects of numbers/arrays."""
return self.__rtruediv__(other)
def __truediv__(self, other):
"""Division of `TimeDelta` objects by numbers/arrays."""
# cannot do __mul__(1./other) as that looses precision
try:
other = other.to(1)
except Exception:
pass
try: # convert to straight float if dimensionless quantity
jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other)
out = TimeDelta(jd1, jd2, format='jd', scale=self.scale)
except Exception as err: # try downgrading self to a quantity
try:
return self.to(u.day) / other
except Exception:
raise err
if self.format != 'jd':
out = out.replicate(format=self.format)
return out
def __rtruediv__(self, other):
"""Division by `TimeDelta` objects of numbers/arrays."""
return other / self.to(u.day)
def to(self, *args, **kwargs):
return u.Quantity(self._time.jd1 + self._time.jd2,
u.day).to(*args, **kwargs)
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent TimeDelta object"""
if not isinstance(value, TimeDelta):
try:
value = self.__class__(value, scale=self.scale)
except Exception:
try:
value = self.__class__(value, scale=self.scale, format=self.format)
except Exception as err:
raise ValueError('cannot convert value to a compatible TimeDelta '
'object: {}'.format(err))
return value
class ScaleValueError(Exception):
pass
def _make_array(val, copy=False):
"""
Take ``val`` and convert/reshape to an array. If ``copy`` is `True`
then copy input values.
Returns
-------
val : ndarray
Array version of ``val``.
"""
val = np.array(val, copy=copy, subok=True)
# Allow only float64, string or object arrays as input
# (object is for datetime, maybe add more specific test later?)
# This also ensures the right byteorder for float64 (closes #2942).
if not (val.dtype == np.float64 or val.dtype.kind in 'OSUa'):
val = np.asanyarray(val, dtype=np.float64)
return val
def _check_for_masked_and_fill(val, val2):
"""
If ``val`` or ``val2`` are masked arrays then fill them and cast
to ndarray.
Returns a mask corresponding to the logical-or of masked elements
in ``val`` and ``val2``. If neither is masked then the return ``mask``
is ``None``.
If either ``val`` or ``val2`` are masked then they are replaced
with filled versions of themselves.
Parameters
----------
val : ndarray or MaskedArray
Input val
val2 : ndarray or MaskedArray
Input val2
Returns
-------
mask, val, val2: ndarray or None
Mask: (None or bool ndarray), val, val2: ndarray
"""
def get_as_filled_ndarray(mask, val):
"""
Fill the given MaskedArray ``val`` from the first non-masked
element in the array. This ensures that upstream Time initialization
will succeed.
Note that nothing happens if there are no masked elements.
"""
fill_value = None
if np.any(val.mask):
# Final mask is the logical-or of inputs
mask = mask | val.mask
# First unmasked element. If all elements are masked then
# use fill_value=None from above which will use val.fill_value.
# As long as the user has set this appropriately then all will
# be fine.
val_unmasked = val.compressed() # 1-d ndarray of unmasked values
if len(val_unmasked) > 0:
fill_value = val_unmasked[0]
# Fill the input ``val``. If fill_value is None then this just returns
# an ndarray view of val (no copy).
val = val.filled(fill_value)
return mask, val
mask = False
if isinstance(val, np.ma.MaskedArray):
mask, val = get_as_filled_ndarray(mask, val)
if isinstance(val2, np.ma.MaskedArray):
mask, val2 = get_as_filled_ndarray(mask, val2)
return mask, val, val2
class OperandTypeError(TypeError):
def __init__(self, left, right, op=None):
op_string = '' if op is None else ' for {0}'.format(op)
super().__init__(
"Unsupported operand type(s){0}: "
"'{1}' and '{2}'".format(op_string,
left.__class__.__name__,
right.__class__.__name__))
|
bd457f9fd271892cc682959025b3306d5ff15cd022065d38ced7cbd2d2c4f6fa | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Time utilities.
In particular, routines to do basic arithmetic on numbers represented by two
doubles, using the procedure of Shewchuk, 1997, Discrete & Computational
Geometry 18(3):305-363 -- http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
"""
import numpy as np
from .. import units as u
def day_frac(val1, val2, factor=1., divisor=1.):
"""
Return the sum of ``val1`` and ``val2`` as two float64s, an integer part
and the fractional remainder. If ``factor`` is not 1.0 then multiply the
sum by ``factor``. If ``divisor`` is not 1.0 then divide the sum by
``divisor``.
The arithmetic is all done with exact floating point operations so no
precision is lost to rounding error. This routine assumes the sum is less
than about 1e16, otherwise the ``frac`` part will be greater than 1.0.
Returns
-------
day, frac : float64
Integer and fractional part of val1 + val2.
"""
# Add val1 and val2 exactly, returning the result as two float64s.
# The first is the approximate sum (with some floating point error)
# and the second is the error of the float64 sum.
sum12, err12 = two_sum(val1, val2)
if np.any(factor != 1.):
sum12, carry = two_product(sum12, factor)
carry += err12 * factor
sum12, err12 = two_sum(sum12, carry)
if np.any(divisor != 1.):
q1 = sum12 / divisor
p1, p2 = two_product(q1, divisor)
d1, d2 = two_sum(sum12, -p1)
d2 += err12
d2 -= p2
q2 = (d1 + d2) / divisor # 3-part float fine here; nothing can be lost
sum12, err12 = two_sum(q1, q2)
# get integer fraction
day = np.round(sum12)
extra, frac = two_sum(sum12, -day)
frac += extra + err12
return day, frac
def quantity_day_frac(val1, val2=None):
"""Like ``day_frac``, but for quantities with units of time.
The quantities are separately converted to days. Here, we need to take
care with the conversion since while the routines here can do accurate
multiplication, the conversion factor itself may not be accurate. For
instance, if the quantity is in seconds, the conversion factor is
1./86400., which is not exactly representable as a float.
To work around this, for conversion factors less than unity, rather than
multiply by that possibly inaccurate factor, the value is divided by the
conversion factor of a day to that unit (i.e., by 86400. for seconds). For
conversion factors larger than 1, such as 365.25 for years, we do just
multiply. With this scheme, one has precise conversion factors for all
regular time units that astropy defines. Note, however, that it does not
necessarily work for all custom time units, and cannot work when conversion
to time is via an equivalency. For those cases, one remains limited by the
fact that Quantity calculations are done in double precision, not in
quadruple precision as for time.
"""
if val2 is not None:
res11, res12 = quantity_day_frac(val1)
res21, res22 = quantity_day_frac(val2)
# This summation is can at most lose 1 ULP in the second number.
return res11 + res21, res12 + res22
try:
factor = val1.unit.to(u.day)
except Exception:
# Not a simple scaling, so cannot do the full-precision one.
# But at least try normal conversion, since equivalencies may be set.
return val1.to_value(u.day), 0.
if factor >= 1.:
return day_frac(val1.value, 0., factor=factor)
else:
divisor = u.day.to(val1.unit)
return day_frac(val1.value, 0., divisor=divisor)
def two_sum(a, b):
"""
Add ``a`` and ``b`` exactly, returning the result as two float64s.
The first is the approximate sum (with some floating point error)
and the second is the error of the float64 sum.
Using the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
Returns
-------
sum, err : float64
Approximate sum of a + b and the exact floating point error
"""
x = a + b
eb = x - a
eb = b - eb
ea = x - b
ea = a - ea
return x, ea + eb
def two_product(a, b):
"""
Multiple ``a`` and ``b`` exactly, returning the result as two float64s.
The first is the approximate product (with some floating point error)
and the second is the error of the float64 product.
Uses the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
Returns
-------
prod, err : float64
Approximate product a * b and the exact floating point error
"""
x = a * b
ah, al = split(a)
bh, bl = split(b)
y1 = ah * bh
y = x - y1
y2 = al * bh
y -= y2
y3 = ah * bl
y -= y3
y4 = al * bl
y = y4 - y
return x, y
def split(a):
"""
Split float64 in two aligned parts.
Uses the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
"""
c = 134217729. * a # 2**27+1.
abig = c - a
ah = c - abig
al = a - ah
return ah, al
|
2816736dbdd07f3efd418f7d80ba4847835dff419fe25fed18055197300fc8a4 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import fnmatch
import time
import re
import datetime
from collections import OrderedDict, defaultdict
import numpy as np
from ..utils.decorators import lazyproperty
from .. import units as u
from .. import _erfa as erfa
from .utils import day_frac, quantity_day_frac, two_sum, two_product
__all__ = ['TimeFormat', 'TimeJD', 'TimeMJD', 'TimeFromEpoch', 'TimeUnix',
'TimeCxcSec', 'TimeGPS', 'TimeDecimalYear',
'TimePlotDate', 'TimeUnique', 'TimeDatetime', 'TimeString',
'TimeISO', 'TimeISOT', 'TimeFITS', 'TimeYearDayTime',
'TimeEpochDate', 'TimeBesselianEpoch', 'TimeJulianEpoch',
'TimeDeltaFormat', 'TimeDeltaSec', 'TimeDeltaJD',
'TimeEpochDateString', 'TimeBesselianEpochString',
'TimeJulianEpochString', 'TIME_FORMATS', 'TIME_DELTA_FORMATS',
'TimezoneInfo', 'TimeDeltaDatetime']
__doctest_skip__ = ['TimePlotDate']
# These both get filled in at end after TimeFormat subclasses defined.
# Use an OrderedDict to fix the order in which formats are tried.
# This ensures, e.g., that 'isot' gets tried before 'fits'.
TIME_FORMATS = OrderedDict()
TIME_DELTA_FORMATS = OrderedDict()
# Translations between deprecated FITS timescales defined by
# Rots et al. 2015, A&A 574:A36, and timescales used here.
FITS_DEPRECATED_SCALES = {'TDT': 'tt', 'ET': 'tt',
'GMT': 'utc', 'UT': 'utc', 'IAT': 'tai'}
def _regexify_subfmts(subfmts):
"""
Iterate through each of the sub-formats and try substituting simple
regular expressions for the strptime codes for year, month, day-of-month,
hour, minute, second. If no % characters remain then turn the final string
into a compiled regex. This assumes time formats do not have a % in them.
This is done both to speed up parsing of strings and to allow mixed formats
where strptime does not quite work well enough.
"""
new_subfmts = []
for subfmt_tuple in subfmts:
subfmt_in = subfmt_tuple[1]
for strptime_code, regex in (('%Y', r'(?P<year>\d\d\d\d)'),
('%m', r'(?P<mon>\d{1,2})'),
('%d', r'(?P<mday>\d{1,2})'),
('%H', r'(?P<hour>\d{1,2})'),
('%M', r'(?P<min>\d{1,2})'),
('%S', r'(?P<sec>\d{1,2})')):
subfmt_in = subfmt_in.replace(strptime_code, regex)
if '%' not in subfmt_in:
subfmt_tuple = (subfmt_tuple[0],
re.compile(subfmt_in + '$'),
subfmt_tuple[2])
new_subfmts.append(subfmt_tuple)
return tuple(new_subfmts)
class TimeFormatMeta(type):
"""
Metaclass that adds `TimeFormat` and `TimeDeltaFormat` to the
`TIME_FORMATS` and `TIME_DELTA_FORMATS` registries, respectively.
"""
_registry = TIME_FORMATS
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
# Register time formats that have a name, but leave out astropy_time since
# it is not a user-accessible format and is only used for initialization into
# a different format.
if 'name' in members and cls.name != 'astropy_time':
mcls._registry[cls.name] = cls
if 'subfmts' in members:
cls.subfmts = _regexify_subfmts(members['subfmts'])
return cls
class TimeFormat(metaclass=TimeFormatMeta):
"""
Base class for time representations.
Parameters
----------
val1 : numpy ndarray, list, number, str, or bytes
Values to initialize the time or times. Bytes are decoded as ascii.
val2 : numpy ndarray, list, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
scale : str
Time scale of input value(s)
precision : int
Precision for seconds as floating point
in_subfmt : str
Select subformat for inputting string times
out_subfmt : str
Select subformat for outputting string times
from_jd : bool
If true then val1, val2 are jd1, jd2
"""
def __init__(self, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
self.scale = scale # validation of scale done later with _check_scale
self.precision = precision
self.in_subfmt = in_subfmt
self.out_subfmt = out_subfmt
if from_jd:
self.jd1 = val1
self.jd2 = val2
else:
val1, val2 = self._check_val_type(val1, val2)
self.set_jds(val1, val2)
def __len__(self):
return len(self.jd1)
@property
def scale(self):
"""Time scale"""
self._scale = self._check_scale(self._scale)
return self._scale
@scale.setter
def scale(self, val):
self._scale = val
def mask_if_needed(self, value):
if self.masked:
value = np.ma.array(value, mask=self.mask, copy=False)
return value
@property
def mask(self):
if 'mask' not in self.cache:
self.cache['mask'] = np.isnan(self.jd2)
if self.cache['mask'].shape:
self.cache['mask'].flags.writeable = False
return self.cache['mask']
@property
def masked(self):
if 'masked' not in self.cache:
self.cache['masked'] = bool(np.any(self.mask))
return self.cache['masked']
@property
def jd2_filled(self):
return np.nan_to_num(self.jd2) if self.masked else self.jd2
@lazyproperty
def cache(self):
"""
Return the cache associated with this instance.
"""
return defaultdict(dict)
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
# val1 cannot contain nan, but val2 can contain nan
ok1 = val1.dtype == np.double and np.all(np.isfinite(val1))
ok2 = val2 is None or (val2.dtype == np.double and not np.any(np.isinf(val2)))
if not (ok1 and ok2):
raise TypeError('Input values for {0} class must be finite doubles'
.format(self.name))
if getattr(val1, 'unit', None) is not None:
# Convert any quantity-likes to days first, attempting to be
# careful with the conversion, so that, e.g., large numbers of
# seconds get converted without loosing precision because
# 1/86400 is not exactly representable as a float.
val1 = u.Quantity(val1, copy=False)
if val2 is not None:
val2 = u.Quantity(val2, copy=False)
try:
val1, val2 = quantity_day_frac(val1, val2)
except u.UnitsError:
raise u.UnitConversionError(
"only quantities with time units can be "
"used to instantiate Time instances.")
# We now have days, but the format may expect another unit.
# On purpose, multiply with 1./day_unit because typically it is
# 1./erfa.DAYSEC, and inverting it recovers the integer.
# (This conversion will get undone in format's set_jds, hence
# there may be room for optimizing this.)
factor = 1. / getattr(self, 'unit', 1.)
if factor != 1.:
val1, carry = two_product(val1, factor)
carry += val2 * factor
val1, val2 = two_sum(val1, carry)
elif getattr(val2, 'unit', None) is not None:
raise TypeError('Cannot mix float and Quantity inputs')
if val2 is None:
val2 = np.zeros_like(val1)
def asarray_or_scalar(val):
"""
Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray
or a Python or numpy scalar.
"""
return np.asarray(val) if isinstance(val, np.ndarray) else val
return asarray_or_scalar(val1), asarray_or_scalar(val2)
def _check_scale(self, scale):
"""
Return a validated scale value.
If there is a class attribute 'scale' then that defines the default /
required time scale for this format. In this case if a scale value was
provided that needs to match the class default, otherwise return
the class default.
Otherwise just make sure that scale is in the allowed list of
scales. Provide a different error message if `None` (no value) was
supplied.
"""
if hasattr(self.__class__, 'epoch_scale') and scale is None:
scale = self.__class__.epoch_scale
if scale is None:
scale = 'utc' # Default scale as of astropy 0.4
if scale not in TIME_SCALES:
raise ScaleValueError("Scale value '{0}' not in "
"allowed values {1}"
.format(scale, TIME_SCALES))
return scale
def set_jds(self, val1, val2):
"""
Set internal jd1 and jd2 from val1 and val2. Must be provided
by derived classes.
"""
raise NotImplementedError
def to_value(self, parent=None):
"""
Return time representation from internal jd1 and jd2. This is
the base method that ignores ``parent`` and requires that
subclasses implement the ``value`` property. Subclasses that
require ``parent`` or have other optional args for ``to_value``
should compute and return the value directly.
"""
return self.mask_if_needed(self.value)
@property
def value(self):
raise NotImplementedError
class TimeJD(TimeFormat):
"""
Julian Date time format.
This represents the number of days since the beginning of
the Julian Period.
For example, 2451544.5 in JD is midnight on January 1, 2000.
"""
name = 'jd'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2)
@property
def value(self):
return self.jd1 + self.jd2
class TimeMJD(TimeFormat):
"""
Modified Julian Date time format.
This represents the number of days since midnight on November 17, 1858.
For example, 51544.0 in MJD is midnight on January 1, 2000.
"""
name = 'mjd'
def set_jds(self, val1, val2):
# TODO - this routine and vals should be Cythonized to follow the ERFA
# convention of preserving precision by adding to the larger of the two
# values in a vectorized operation. But in most practical cases the
# first one is probably biggest.
self._check_scale(self._scale) # Validate scale.
jd1, jd2 = day_frac(val1, val2)
jd1 += erfa.DJM0 # erfa.DJM0=2400000.5 (from erfam.h)
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
return (self.jd1 - erfa.DJM0) + self.jd2
class TimeDecimalYear(TimeFormat):
"""
Time as a decimal year, with integer values corresponding to midnight
of the first day of each year. For example 2000.5 corresponds to the
ISO time '2000-07-02 00:00:00'.
"""
name = 'decimalyear'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
sum12, err12 = two_sum(val1, val2)
iy_start = np.trunc(sum12).astype(int)
extra, y_frac = two_sum(sum12, -iy_start)
y_frac += extra + err12
val = (val1 + val2).astype(np.double)
iy_start = np.trunc(val).astype(int)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(y_frac)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode('ascii')
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
ihr, imin, isec)
t_start = Time(jd1_start, jd2_start, scale=self.scale, format='jd')
t_end = Time(jd1_end, jd2_end, scale=self.scale, format='jd')
t_frac = t_start + (t_end - t_start) * y_frac
self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2)
@property
def value(self):
scale = self.scale.upper().encode('ascii')
iy_start, ims, ids, ihmsfs = erfa.d2dtf(scale, 0, # precision=0
self.jd1, self.jd2_filled)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(self.jd1)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode('ascii')
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
ihr, imin, isec)
dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start)
dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start)
decimalyear = iy_start + dt / dt_end
return decimalyear
class TimeFromEpoch(TimeFormat):
"""
Base class for times that represent the interval from a particular
epoch as a floating point multiple of a unit time interval (e.g. seconds
or days).
"""
def __init__(self, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
self.scale = scale
# Initialize the reference epoch (a single time defined in subclasses)
epoch = Time(self.epoch_val, self.epoch_val2, scale=self.epoch_scale,
format=self.epoch_format)
self.epoch = epoch
# Now create the TimeFormat object as normal
super().__init__(val1, val2, scale, precision, in_subfmt, out_subfmt,
from_jd)
def set_jds(self, val1, val2):
"""
Initialize the internal jd1 and jd2 attributes given val1 and val2.
For an TimeFromEpoch subclass like TimeUnix these will be floats giving
the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00).
"""
# Form new JDs based on epoch time + time from epoch (converted to JD).
# One subtlety that might not be obvious is that 1.000 Julian days in
# UTC can be 86400 or 86401 seconds. For the TimeUnix format the
# assumption is that every day is exactly 86400 seconds, so this is, in
# principle, doing the math incorrectly, *except* that it matches the
# definition of Unix time which does not include leap seconds.
# note: use divisor=1./self.unit, since this is either 1 or 1/86400,
# and 1/86400 is not exactly representable as a float64, so multiplying
# by that will cause rounding errors. (But inverting it as a float64
# recovers the exact number)
day, frac = day_frac(val1, val2, divisor=1. / self.unit)
jd1 = self.epoch.jd1 + day
jd2 = self.epoch.jd2 + frac
# Create a temporary Time object corresponding to the new (jd1, jd2) in
# the epoch scale (e.g. UTC for TimeUnix) then convert that to the
# desired time scale for this object.
#
# A known limitation is that the transform from self.epoch_scale to
# self.scale cannot involve any metadata like lat or lon.
try:
tm = getattr(Time(jd1, jd2, scale=self.epoch_scale,
format='jd'), self.scale)
except Exception as err:
raise ScaleValueError("Cannot convert from '{0}' epoch scale '{1}'"
"to specified scale '{2}', got error:\n{3}"
.format(self.name, self.epoch_scale,
self.scale, err))
self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2)
def to_value(self, parent=None):
# Make sure that scale is the same as epoch scale so we can just
# subtract the epoch and convert
if self.scale != self.epoch_scale:
if parent is None:
raise ValueError('cannot compute value without parent Time object')
try:
tm = getattr(parent, self.epoch_scale)
except Exception as err:
raise ScaleValueError("Cannot convert from '{0}' epoch scale '{1}'"
"to specified scale '{2}', got error:\n{3}"
.format(self.name, self.epoch_scale,
self.scale, err))
jd1, jd2 = tm._time.jd1, tm._time.jd2
else:
jd1, jd2 = self.jd1, self.jd2
time_from_epoch = ((jd1 - self.epoch.jd1) +
(jd2 - self.epoch.jd2)) / self.unit
return self.mask_if_needed(time_from_epoch)
value = property(to_value)
class TimeUnix(TimeFromEpoch):
"""
Unix time: seconds from 1970-01-01 00:00:00 UTC.
For example, 946684800.0 in Unix time is midnight on January 1, 2000.
NOTE: this quantity is not exactly unix time and differs from the strict
POSIX definition by up to 1 second on days with a leap second. POSIX
unix time actually jumps backward by 1 second at midnight on leap second
days while this class value is monotonically increasing at 86400 seconds
per UTC day.
"""
name = 'unix'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1970-01-01 00:00:00'
epoch_val2 = None
epoch_scale = 'utc'
epoch_format = 'iso'
class TimeCxcSec(TimeFromEpoch):
"""
Chandra X-ray Center seconds from 1998-01-01 00:00:00 TT.
For example, 63072064.184 is midnight on January 1, 2000.
"""
name = 'cxcsec'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1998-01-01 00:00:00'
epoch_val2 = None
epoch_scale = 'tt'
epoch_format = 'iso'
class TimeGPS(TimeFromEpoch):
"""GPS time: seconds from 1980-01-06 00:00:00 UTC
For example, 630720013.0 is midnight on January 1, 2000.
Notes
=====
This implementation is strictly a representation of the number of seconds
(including leap seconds) since midnight UTC on 1980-01-06. GPS can also be
considered as a time scale which is ahead of TAI by a fixed offset
(to within about 100 nanoseconds).
For details, see http://tycho.usno.navy.mil/gpstt.html
"""
name = 'gps'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1980-01-06 00:00:19'
# above epoch is the same as Time('1980-01-06 00:00:00', scale='utc').tai
epoch_val2 = None
epoch_scale = 'tai'
epoch_format = 'iso'
class TimePlotDate(TimeFromEpoch):
"""
Matplotlib `~matplotlib.pyplot.plot_date` input:
1 + number of days from 0001-01-01 00:00:00 UTC
This can be used directly in the matplotlib `~matplotlib.pyplot.plot_date`
function::
>>> import matplotlib.pyplot as plt
>>> jyear = np.linspace(2000, 2001, 20)
>>> t = Time(jyear, format='jyear', scale='utc')
>>> plt.plot_date(t.plot_date, jyear)
>>> plt.gcf().autofmt_xdate() # orient date labels at a slant
>>> plt.draw()
For example, 730120.0003703703 is midnight on January 1, 2000.
"""
# This corresponds to the zero reference time for matplotlib plot_date().
# Note that TAI and UTC are equivalent at the reference time.
name = 'plot_date'
unit = 1.0
epoch_val = 1721424.5 # Time('0001-01-01 00:00:00', scale='tai').jd - 1
epoch_val2 = None
epoch_scale = 'utc'
epoch_format = 'jd'
class TimeUnique(TimeFormat):
"""
Base class for time formats that can uniquely create a time object
without requiring an explicit format specifier. This class does
nothing but provide inheritance to identify a class as unique.
"""
class TimeAstropyTime(TimeUnique):
"""
Instantiate date from an Astropy Time object (or list thereof).
This is purely for instantiating from a Time object. The output
format is the same as the first time instance.
"""
name = 'astropy_time'
def __new__(cls, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
"""
Use __new__ instead of __init__ to output a class instance that
is the same as the class of the first Time object in the list.
"""
val1_0 = val1.flat[0]
if not (isinstance(val1_0, Time) and all(type(val) is type(val1_0)
for val in val1.flat)):
raise TypeError('Input values for {0} class must all be same '
'astropy Time type.'.format(cls.name))
if scale is None:
scale = val1_0.scale
if val1.shape:
vals = [getattr(val, scale)._time for val in val1]
jd1 = np.concatenate([np.atleast_1d(val.jd1) for val in vals])
jd2 = np.concatenate([np.atleast_1d(val.jd2) for val in vals])
else:
val = getattr(val1_0, scale)._time
jd1, jd2 = val.jd1, val.jd2
OutTimeFormat = val1_0._time.__class__
self = OutTimeFormat(jd1, jd2, scale, precision, in_subfmt, out_subfmt,
from_jd=True)
return self
class TimeDatetime(TimeUnique):
"""
Represent date as Python standard library `~datetime.datetime` object
Example::
>>> from astropy.time import Time
>>> from datetime import datetime
>>> t = Time(datetime(2000, 1, 2, 12, 0, 0), scale='utc')
>>> t.iso
'2000-01-02 12:00:00.000'
>>> t.tt.datetime
datetime.datetime(2000, 1, 2, 12, 1, 4, 184000)
"""
name = 'datetime'
def _check_val_type(self, val1, val2):
# Note: don't care about val2 for this class
if not all(isinstance(val, datetime.datetime) for val in val1.flat):
raise TypeError('Input values for {0} class must be '
'datetime objects'.format(self.name))
return val1, None
def set_jds(self, val1, val2):
"""Convert datetime object contained in val1 to jd1, jd2"""
# Iterate through the datetime objects, getting year, month, etc.
iterator = np.nditer([val1, None, None, None, None, None, None],
flags=['refs_ok'],
op_dtypes=[object] + 5*[np.intc] + [np.double])
for val, iy, im, id, ihr, imin, dsec in iterator:
dt = val.item()
if dt.tzinfo is not None:
dt = (dt - dt.utcoffset()).replace(tzinfo=None)
iy[...] = dt.year
im[...] = dt.month
id[...] = dt.day
ihr[...] = dt.hour
imin[...] = dt.minute
dsec[...] = dt.second + dt.microsecond / 1e6
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
*iterator.operands[1:])
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, timezone=None, parent=None):
"""
Convert to (potentially timezone-aware) `~datetime.datetime` object.
If ``timezone`` is not ``None``, return a timezone-aware datetime
object.
Parameters
----------
timezone : {`~datetime.tzinfo`, None} (optional)
If not `None`, return timezone-aware datetime.
Returns
-------
`~datetime.datetime`
If ``timezone`` is not ``None``, output will be timezone-aware.
"""
if timezone is not None:
if self._scale != 'utc':
raise ScaleValueError("scale is {}, must be 'utc' when timezone "
"is supplied.".format(self._scale))
# Rather than define a value property directly, we have a function,
# since we want to be able to pass in timezone information.
scale = self.scale.upper().encode('ascii')
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 6, # 6 for microsec
self.jd1, self.jd2_filled)
ihrs = ihmsfs['h']
imins = ihmsfs['m']
isecs = ihmsfs['s']
ifracs = ihmsfs['f']
iterator = np.nditer([iys, ims, ids, ihrs, imins, isecs, ifracs, None],
flags=['refs_ok'],
op_dtypes=7*[iys.dtype] + [object])
for iy, im, id, ihr, imin, isec, ifracsec, out in iterator:
if isec >= 60:
raise ValueError('Time {} is within a leap second but datetime '
'does not support leap seconds'
.format((iy, im, id, ihr, imin, isec, ifracsec)))
if timezone is not None:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec,
tzinfo=TimezoneInfo()).astimezone(timezone)
else:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec)
return self.mask_if_needed(iterator.operands[-1])
value = property(to_value)
class TimezoneInfo(datetime.tzinfo):
"""
Subclass of the `~datetime.tzinfo` object, used in the
to_datetime method to specify timezones.
It may be safer in most cases to use a timezone database package like
pytz rather than defining your own timezones - this class is mainly
a workaround for users without pytz.
"""
@u.quantity_input(utc_offset=u.day, dst=u.day)
def __init__(self, utc_offset=0*u.day, dst=0*u.day, tzname=None):
"""
Parameters
----------
utc_offset : `~astropy.units.Quantity` (optional)
Offset from UTC in days. Defaults to zero.
dst : `~astropy.units.Quantity` (optional)
Daylight Savings Time offset in days. Defaults to zero
(no daylight savings).
tzname : string, `None` (optional)
Name of timezone
Examples
--------
>>> from datetime import datetime
>>> from astropy.time import TimezoneInfo # Specifies a timezone
>>> import astropy.units as u
>>> utc = TimezoneInfo() # Defaults to UTC
>>> utc_plus_one_hour = TimezoneInfo(utc_offset=1*u.hour) # UTC+1
>>> dt_aware = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc_plus_one_hour)
>>> print(dt_aware)
2000-01-01 00:00:00+01:00
>>> print(dt_aware.astimezone(utc))
1999-12-31 23:00:00+00:00
"""
if utc_offset == 0 and dst == 0 and tzname is None:
tzname = 'UTC'
self._utcoffset = datetime.timedelta(utc_offset.to_value(u.day))
self._tzname = tzname
self._dst = datetime.timedelta(dst.to_value(u.day))
def utcoffset(self, dt):
return self._utcoffset
def tzname(self, dt):
return str(self._tzname)
def dst(self, dt):
return self._dst
class TimeString(TimeUnique):
"""
Base class for string-like time representations.
This class assumes that anything following the last decimal point to the
right is a fraction of a second.
This is a reference implementation can be made much faster with effort.
"""
def _check_val_type(self, val1, val2):
# Note: don't care about val2 for these classes
if val1.dtype.kind not in ('S', 'U'):
raise TypeError('Input values for {0} class must be strings'
.format(self.name))
return val1, None
def parse_string(self, timestr, subfmts):
"""Read time from a single string, using a set of possible formats."""
# Datetime components required for conversion to JD by ERFA, along
# with the default values.
components = ('year', 'mon', 'mday', 'hour', 'min', 'sec')
defaults = (None, 1, 1, 0, 0, 0)
# Assume that anything following "." on the right side is a
# floating fraction of a second.
try:
idot = timestr.rindex('.')
except Exception:
fracsec = 0.0
else:
timestr, fracsec = timestr[:idot], timestr[idot:]
fracsec = float(fracsec)
for _, strptime_fmt_or_regex, _ in subfmts:
if isinstance(strptime_fmt_or_regex, str):
try:
tm = time.strptime(timestr, strptime_fmt_or_regex)
except ValueError:
continue
else:
vals = [getattr(tm, 'tm_' + component)
for component in components]
else:
tm = re.match(strptime_fmt_or_regex, timestr)
if tm is None:
continue
tm = tm.groupdict()
vals = [int(tm.get(component, default)) for component, default
in zip(components, defaults)]
# Add fractional seconds
vals[-1] = vals[-1] + fracsec
return vals
else:
raise ValueError('Time {0} does not match {1} format'
.format(timestr, self.name))
def set_jds(self, val1, val2):
"""Parse the time strings contained in val1 and set jd1, jd2"""
# Select subformats based on current self.in_subfmt
subfmts = self._select_subfmts(self.in_subfmt)
# Be liberal in what we accept: convert bytes to ascii.
# Here .item() is needed for arrays with entries of unequal length,
# to strip trailing 0 bytes.
to_string = (str if val1.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([val1, None, None, None, None, None, None],
op_dtypes=[val1.dtype] + 5*[np.intc] + [np.double])
for val, iy, im, id, ihr, imin, dsec in iterator:
val = to_string(val)
iy[...], im[...], id[...], ihr[...], imin[...], dsec[...] = (
self.parse_string(val, subfmts))
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
*iterator.operands[1:])
self.jd1, self.jd2 = day_frac(jd1, jd2)
def str_kwargs(self):
"""
Generator that yields a dict of values corresponding to the
calendar date and time for the internal JD values.
"""
scale = self.scale.upper().encode('ascii'),
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, self.precision,
self.jd1, self.jd2_filled)
# Get the str_fmt element of the first allowed output subformat
_, _, str_fmt = self._select_subfmts(self.out_subfmt)[0]
if '{yday:' in str_fmt:
has_yday = True
else:
has_yday = False
yday = None
ihrs = ihmsfs['h']
imins = ihmsfs['m']
isecs = ihmsfs['s']
ifracs = ihmsfs['f']
for iy, im, id, ihr, imin, isec, ifracsec in np.nditer(
[iys, ims, ids, ihrs, imins, isecs, ifracs]):
if has_yday:
yday = datetime.datetime(iy, im, id).timetuple().tm_yday
yield {'year': int(iy), 'mon': int(im), 'day': int(id),
'hour': int(ihr), 'min': int(imin), 'sec': int(isec),
'fracsec': int(ifracsec), 'yday': yday}
def format_string(self, str_fmt, **kwargs):
"""Write time to a string using a given format.
By default, just interprets str_fmt as a format string,
but subclasses can add to this.
"""
return str_fmt.format(**kwargs)
@property
def value(self):
# Select the first available subformat based on current
# self.out_subfmt
subfmts = self._select_subfmts(self.out_subfmt)
_, _, str_fmt = subfmts[0]
# TODO: fix this ugly hack
if self.precision > 0 and str_fmt.endswith('{sec:02d}'):
str_fmt += '.{fracsec:0' + str(self.precision) + 'd}'
# Try to optimize this later. Can't pre-allocate because length of
# output could change, e.g. year rolls from 999 to 1000.
outs = []
for kwargs in self.str_kwargs():
outs.append(str(self.format_string(str_fmt, **kwargs)))
return np.array(outs).reshape(self.jd1.shape)
def _select_subfmts(self, pattern):
"""
Return a list of subformats where name matches ``pattern`` using
fnmatch.
"""
fnmatchcase = fnmatch.fnmatchcase
subfmts = [x for x in self.subfmts if fnmatchcase(x[0], pattern)]
if len(subfmts) == 0:
raise ValueError('No subformats match {0}'.format(pattern))
return subfmts
class TimeISO(TimeString):
"""
ISO 8601 compliant date-time format "YYYY-MM-DD HH:MM:SS.sss...".
For example, 2000-01-01 00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'iso'
subfmts = (('date_hms',
'%Y-%m-%d %H:%M:%S',
# XXX To Do - use strftime for output ??
'{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y-%m-%d %H:%M',
'{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}'),
('date',
'%Y-%m-%d',
'{year:d}-{mon:02d}-{day:02d}'))
def parse_string(self, timestr, subfmts):
# Handle trailing 'Z' for UTC time
if timestr.endswith('Z'):
if self.scale != 'utc':
raise ValueError("Time input terminating in 'Z' must have "
"scale='UTC'")
timestr = timestr[:-1]
return super().parse_string(timestr, subfmts)
class TimeISOT(TimeISO):
"""
ISO 8601 compliant date-time format "YYYY-MM-DDTHH:MM:SS.sss...".
This is the same as TimeISO except for a "T" instead of space between
the date and time.
For example, 2000-01-01T00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'isot'
subfmts = (('date_hms',
'%Y-%m-%dT%H:%M:%S',
'{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y-%m-%dT%H:%M',
'{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}'),
('date',
'%Y-%m-%d',
'{year:d}-{mon:02d}-{day:02d}'))
class TimeYearDayTime(TimeISO):
"""
Year, day-of-year and time as "YYYY:DOY:HH:MM:SS.sss...".
The day-of-year (DOY) goes from 001 to 365 (366 in leap years).
For example, 2000:001:00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'yday'
subfmts = (('date_hms',
'%Y:%j:%H:%M:%S',
'{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y:%j:%H:%M',
'{year:d}:{yday:03d}:{hour:02d}:{min:02d}'),
('date',
'%Y:%j',
'{year:d}:{yday:03d}'))
class TimeFITS(TimeString):
"""
FITS format: "[±Y]YYYY-MM-DD[THH:MM:SS[.sss]][(SCALE[(REALIZATION)])]".
ISOT with two extensions:
- Can give signed five-digit year (mostly for negative years);
- A possible time scale (and realization) appended in parentheses.
Note: FITS supports some deprecated names for timescales; these are
translated to the formal names upon initialization. Furthermore, any
specific realization information is stored only as long as the time scale
is not changed.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date': date
- 'longdate_hms': as 'date_hms', but with signed 5-digit year
- 'longdate': as 'date', but with signed 5-digit year
See Rots et al., 2015, A&A 574:A36 (arXiv:1409.7583).
"""
name = 'fits'
subfmts = (
('date_hms',
(r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)T'
r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'),
'{year:04d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('date',
r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)',
'{year:04d}-{mon:02d}-{day:02d}'),
('longdate_hms',
(r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)T'
r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'),
'{year:+06d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('longdate',
r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)',
'{year:+06d}-{mon:02d}-{day:02d}'))
# Add the regex that parses the scale and possible realization.
subfmts = tuple(
(subfmt[0],
subfmt[1] + r'(\((?P<scale>\w+)(\((?P<realization>\w+)\))?\))?',
subfmt[2]) for subfmt in subfmts)
_fits_scale = None
_fits_realization = None
def parse_string(self, timestr, subfmts):
"""Read time and set scale according to trailing scale codes."""
# Try parsing with any of the allowed sub-formats.
for _, regex, _ in subfmts:
tm = re.match(regex, timestr)
if tm:
break
else:
raise ValueError('Time {0} does not match {1} format'
.format(timestr, self.name))
tm = tm.groupdict()
if tm['scale'] is not None:
# If a scale was given, translate from a possible deprecated
# timescale identifier to the scale used by Time.
fits_scale = tm['scale'].upper()
scale = FITS_DEPRECATED_SCALES.get(fits_scale, fits_scale.lower())
if scale not in TIME_SCALES:
raise ValueError("Scale {0!r} is not in the allowed scales {1}"
.format(scale, sorted(TIME_SCALES)))
# If no scale was given in the initialiser, set the scale to
# that given in the string. Also store a possible realization,
# so we can round-trip (as long as no scale changes are made).
fits_realization = (tm['realization'].upper()
if tm['realization'] else None)
if self._fits_scale is None:
self._fits_scale = fits_scale
self._fits_realization = fits_realization
if self._scale is None:
self._scale = scale
if (scale != self.scale or fits_scale != self._fits_scale or
fits_realization != self._fits_realization):
raise ValueError("Input strings for {0} class must all "
"have consistent time scales."
.format(self.name))
return [int(tm['year']), int(tm['mon']), int(tm['mday']),
int(tm.get('hour', 0)), int(tm.get('min', 0)),
float(tm.get('sec', 0.))]
def format_string(self, str_fmt, **kwargs):
"""Format time-string: append the scale to the normal ISOT format."""
time_str = super().format_string(str_fmt, **kwargs)
if self._fits_scale and self._fits_realization:
return '{0}({1}({2}))'.format(time_str, self._fits_scale,
self._fits_realization)
else:
return '{0}({1})'.format(time_str, self._scale.upper())
@property
def value(self):
"""Convert times to strings, using signed 5 digit if necessary."""
if 'long' not in self.out_subfmt:
# If we have times before year 0 or after year 9999, we can
# output only in a "long" format, using signed 5-digit years.
jd = self.jd1 + self.jd2
if jd.min() < 1721425.5 or jd.max() >= 5373484.5:
self.out_subfmt = 'long' + self.out_subfmt
return super().value
class TimeEpochDate(TimeFormat):
"""
Base class for support floating point Besselian and Julian epoch dates
"""
def set_jds(self, val1, val2):
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(val1 + val2)
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
return jd_to_epoch(self.jd1, self.jd2)
class TimeBesselianEpoch(TimeEpochDate):
"""Besselian Epoch year as floating point value(s) like 1950.0"""
name = 'byear'
epoch_to_jd = 'epb2jd'
jd_to_epoch = 'epb'
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
if hasattr(val1, 'to') and hasattr(val1, 'unit'):
raise ValueError("Cannot use Quantities for 'byear' format, "
"as the interpretation would be ambiguous. "
"Use float with Besselian year instead. ")
return super()._check_val_type(val1, val2)
class TimeJulianEpoch(TimeEpochDate):
"""Julian Epoch year as floating point value(s) like 2000.0"""
name = 'jyear'
unit = erfa.DJY # 365.25, the Julian year, for conversion to quantities
epoch_to_jd = 'epj2jd'
jd_to_epoch = 'epj'
class TimeEpochDateString(TimeString):
"""
Base class to support string Besselian and Julian epoch dates
such as 'B1950.0' or 'J2000.0' respectively.
"""
def set_jds(self, val1, val2):
epoch_prefix = self.epoch_prefix
# Be liberal in what we accept: convert bytes to ascii.
to_string = (str if val1.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([val1, None], op_dtypes=[val1.dtype, np.double])
for val, years in iterator:
try:
time_str = to_string(val)
epoch_type, year_str = time_str[0], time_str[1:]
year = float(year_str)
if epoch_type.upper() != epoch_prefix:
raise ValueError
except (IndexError, ValueError, UnicodeEncodeError):
raise ValueError('Time {0} does not match {1} format'
.format(time_str, self.name))
else:
years[...] = year
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(iterator.operands[-1])
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
years = jd_to_epoch(self.jd1, self.jd2)
# Use old-style format since it is a factor of 2 faster
str_fmt = self.epoch_prefix + '%.' + str(self.precision) + 'f'
outs = [str_fmt % year for year in years.flat]
return np.array(outs).reshape(self.jd1.shape)
class TimeBesselianEpochString(TimeEpochDateString):
"""Besselian Epoch year as string value(s) like 'B1950.0'"""
name = 'byear_str'
epoch_to_jd = 'epb2jd'
jd_to_epoch = 'epb'
epoch_prefix = 'B'
class TimeJulianEpochString(TimeEpochDateString):
"""Julian Epoch year as string value(s) like 'J2000.0'"""
name = 'jyear_str'
epoch_to_jd = 'epj2jd'
jd_to_epoch = 'epj'
epoch_prefix = 'J'
class TimeDeltaFormatMeta(TimeFormatMeta):
_registry = TIME_DELTA_FORMATS
class TimeDeltaFormat(TimeFormat, metaclass=TimeDeltaFormatMeta):
"""Base class for time delta representations"""
def _check_scale(self, scale):
"""
Check that the scale is in the allowed list of scales, or is `None`
"""
if scale is not None and scale not in TIME_DELTA_SCALES:
raise ScaleValueError("Scale value '{0}' not in "
"allowed values {1}"
.format(scale, TIME_DELTA_SCALES))
return scale
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2, divisor=1./self.unit)
@property
def value(self):
return (self.jd1 + self.jd2) / self.unit
class TimeDeltaSec(TimeDeltaFormat):
"""Time delta in SI seconds"""
name = 'sec'
unit = 1. / erfa.DAYSEC # for quantity input
class TimeDeltaJD(TimeDeltaFormat):
"""Time delta in Julian days (86400 SI seconds)"""
name = 'jd'
unit = 1.
class TimeDeltaDatetime(TimeDeltaFormat, TimeUnique):
"""Time delta in datetime.timedelta"""
name = 'datetime'
def _check_val_type(self, val1, val2):
# Note: don't care about val2 for this class
if not all(isinstance(val, datetime.timedelta) for val in val1.flat):
raise TypeError('Input values for {0} class must be '
'datetime.timedelta objects'.format(self.name))
return val1, None
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
iterator = np.nditer([val1, None],
flags=['refs_ok'],
op_dtypes=[object] + [np.double])
for val, sec in iterator:
sec[...] = val.item().total_seconds()
self.jd1, self.jd2 = day_frac(iterator.operands[-1], 0.0,
divisor=erfa.DAYSEC)
@property
def value(self):
iterator = np.nditer([self.jd1 + self.jd2, None],
flags=['refs_ok'],
op_dtypes=[self.jd1.dtype] + [object])
for jd, out in iterator:
out[...] = datetime.timedelta(days=jd.item())
return self.mask_if_needed(iterator.operands[-1])
from .core import Time, TIME_SCALES, TIME_DELTA_SCALES, ScaleValueError
|
028830a0511fb55440e2e3407093b3359e1650d18190d834f66884532f1f9cba | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ['quantity_input']
import inspect
from ..utils.decorators import wraps
from ..utils.misc import isiterable
from .core import Unit, UnitsError, add_enabled_equivalencies
from .physical import _unit_physical_mapping
def _get_allowed_units(targets):
"""
From a list of target units (either as strings or unit objects) and physical
types, return a list of Unit objects.
"""
allowed_units = []
for target in targets:
try: # unit passed in as a string
target_unit = Unit(target)
except ValueError:
try: # See if the function writer specified a physical type
physical_type_id = _unit_physical_mapping[target]
except KeyError: # Function argument target is invalid
raise ValueError("Invalid unit or physical type '{0}'."
.format(target))
# get unit directly from physical type id
target_unit = Unit._from_physical_type_id(physical_type_id)
allowed_units.append(target_unit)
return allowed_units
def _validate_arg_value(param_name, func_name, arg, targets, equivalencies):
"""
Validates the object passed in to the wrapped function, ``arg``, with target
unit or physical type, ``target``.
"""
allowed_units = _get_allowed_units(targets)
for allowed_unit in allowed_units:
try:
is_equivalent = arg.unit.is_equivalent(allowed_unit,
equivalencies=equivalencies)
if is_equivalent:
break
except AttributeError: # Either there is no .unit or no .is_equivalent
if hasattr(arg, "unit"):
error_msg = "a 'unit' attribute without an 'is_equivalent' method"
else:
error_msg = "no 'unit' attribute"
raise TypeError("Argument '{0}' to function '{1}' has {2}. "
"You may want to pass in an astropy Quantity instead."
.format(param_name, func_name, error_msg))
else:
if len(targets) > 1:
raise UnitsError("Argument '{0}' to function '{1}' must be in units"
" convertible to one of: {2}."
.format(param_name, func_name,
[str(targ) for targ in targets]))
else:
raise UnitsError("Argument '{0}' to function '{1}' must be in units"
" convertible to '{2}'."
.format(param_name, func_name,
str(targets[0])))
class QuantityInput:
@classmethod
def as_decorator(cls, func=None, **kwargs):
r"""
A decorator for validating the units of arguments to functions.
Unit specifications can be provided as keyword arguments to the decorator,
or by using function annotation syntax. Arguments to the decorator
take precedence over any function annotations present.
A `~astropy.units.UnitsError` will be raised if the unit attribute of
the argument is not equivalent to the unit specified to the decorator
or in the annotation.
If the argument has no unit attribute, i.e. it is not a Quantity object, a
`ValueError` will be raised.
Where an equivalency is specified in the decorator, the function will be
executed with that equivalency in force.
Notes
-----
The checking of arguments inside variable arguments to a function is not
supported (i.e. \*arg or \**kwargs).
Examples
--------
.. code-block:: python
import astropy.units as u
@u.quantity_input(myangle=u.arcsec)
def myfunction(myangle):
return myangle**2
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec):
return myangle**2
Also you can specify a return value annotation, which will
cause the function to always return a `~astropy.units.Quantity` in that
unit.
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec) -> u.deg**2:
return myangle**2
Using equivalencies::
import astropy.units as u
@u.quantity_input(myenergy=u.eV, equivalencies=u.mass_energy())
def myfunction(myenergy):
return myenergy**2
"""
self = cls(**kwargs)
if func is not None and not kwargs:
return self(func)
else:
return self
def __init__(self, func=None, **kwargs):
self.equivalencies = kwargs.pop('equivalencies', [])
self.decorator_kwargs = kwargs
def __call__(self, wrapped_function):
# Extract the function signature for the function we are wrapping.
wrapped_signature = inspect.signature(wrapped_function)
# Define a new function to return in place of the wrapped one
@wraps(wrapped_function)
def wrapper(*func_args, **func_kwargs):
# Bind the arguments to our new function to the signature of the original.
bound_args = wrapped_signature.bind(*func_args, **func_kwargs)
# Iterate through the parameters of the original signature
for param in wrapped_signature.parameters.values():
# We do not support variable arguments (*args, **kwargs)
if param.kind in (inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL):
continue
# Catch the (never triggered) case where bind relied on a default value.
if param.name not in bound_args.arguments and param.default is not param.empty:
bound_args.arguments[param.name] = param.default
# Get the value of this parameter (argument to new function)
arg = bound_args.arguments[param.name]
# Get target unit or physical type, either from decorator kwargs
# or annotations
if param.name in self.decorator_kwargs:
targets = self.decorator_kwargs[param.name]
else:
targets = param.annotation
# If the targets is empty, then no target units or physical
# types were specified so we can continue to the next arg
if targets is inspect.Parameter.empty:
continue
# If the argument value is None, and the default value is None,
# pass through the None even if there is a target unit
if arg is None and param.default is None:
continue
# Here, we check whether multiple target unit/physical type's
# were specified in the decorator/annotation, or whether a
# single string (unit or physical type) or a Unit object was
# specified
if isinstance(targets, str) or not isiterable(targets):
valid_targets = [targets]
# Check for None in the supplied list of allowed units and, if
# present and the passed value is also None, ignore.
elif None in targets:
if arg is None:
continue
else:
valid_targets = [t for t in targets if t is not None]
else:
valid_targets = targets
# Now we loop over the allowed units/physical types and validate
# the value of the argument:
_validate_arg_value(param.name, wrapped_function.__name__,
arg, valid_targets, self.equivalencies)
# Call the original function with any equivalencies in force.
with add_enabled_equivalencies(self.equivalencies):
return_ = wrapped_function(*func_args, **func_kwargs)
if wrapped_signature.return_annotation not in (inspect.Signature.empty, None):
return return_.to(wrapped_signature.return_annotation)
else:
return return_
return wrapper
quantity_input = QuantityInput.as_decorator
|
1e73444fbda523ba9e4c79aa383897c5ca458321b5349d6815ab9a33ff081d10 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# The idea for this module (but no code) was borrowed from the
# quantities (http://pythonhosted.org/quantities/) package.
"""Helper functions for Quantity.
In particular, this implements the logic that determines scaling and result
units for a given ufunc, given input units.
"""
from fractions import Fraction
import numpy as np
from .core import (UnitsError, UnitConversionError, UnitTypeError,
dimensionless_unscaled, get_current_unit_registry)
from .._erfa import ufunc as erfa_ufunc
def _d(unit):
if unit is None:
return dimensionless_unscaled
else:
return unit
def get_converter(from_unit, to_unit):
"""Like Unit._get_converter, except returns None if no scaling is needed,
i.e., if the inferred scale is unity."""
try:
scale = from_unit._to(to_unit)
except UnitsError:
return from_unit._apply_equivalencies(
from_unit, to_unit, get_current_unit_registry().equivalencies)
except AttributeError:
raise UnitTypeError("Unit '{0}' cannot be converted to '{1}'"
.format(from_unit, to_unit))
if scale == 1.:
return None
else:
return lambda val: scale * val
def get_converters_and_unit(f, unit1, unit2):
converters = [None, None]
# By default, we try adjusting unit2 to unit1, so that the result will
# be unit1 as well. But if there is no second unit, we have to try
# adjusting unit1 (to dimensionless, see below).
if unit2 is None:
if unit1 is None:
# No units for any input -- e.g., np.add(a1, a2, out=q)
return converters, dimensionless_unscaled
changeable = 0
# swap units.
unit2 = unit1
unit1 = None
elif unit2 is unit1:
# ensure identical units is fast ("==" is slow, so avoid that).
return converters, unit1
else:
changeable = 1
# Try to get a converter from unit2 to unit1.
if unit1 is None:
try:
converters[changeable] = get_converter(unit2,
dimensionless_unscaled)
except UnitsError:
# special case: would be OK if unitless number is zero, inf, nan
converters[1-changeable] = False
return converters, unit2
else:
return converters, dimensionless_unscaled
else:
try:
converters[changeable] = get_converter(unit2, unit1)
except UnitsError:
raise UnitConversionError(
"Can only apply '{0}' function to quantities "
"with compatible dimensions"
.format(f.__name__))
return converters, unit1
def can_have_arbitrary_unit(value):
"""Test whether the items in value can have arbitrary units
Numbers whose value does not change upon a unit change, i.e.,
zero, infinity, or not-a-number
Parameters
----------
value : number or array
Returns
-------
`True` if each member is either zero or not finite, `False` otherwise
"""
return np.all(np.logical_or(np.equal(value, 0.), ~np.isfinite(value)))
# SINGLE ARGUMENT UFUNC HELPERS
#
# The functions below take a single argument, which is the quantity upon which
# the ufunc is being used. The output of the helper function should be two
# values: a list with a single converter to be used to scale the input before
# it is being passed to the ufunc (or None if no conversion is needed), and
# the unit the output will be in.
def helper_onearg_test(f, unit):
return ([None], None)
def helper_invariant(f, unit):
return ([None], _d(unit))
def helper_square(f, unit):
return ([None], unit ** 2 if unit is not None else dimensionless_unscaled)
def helper_reciprocal(f, unit):
return ([None], unit ** -1 if unit is not None else dimensionless_unscaled)
one_half = 0.5 # faster than Fraction(1, 2)
one_third = Fraction(1, 3)
def helper_sqrt(f, unit):
return ([None], unit ** one_half if unit is not None
else dimensionless_unscaled)
def helper_cbrt(f, unit):
return ([None], (unit ** one_third if unit is not None
else dimensionless_unscaled))
def helper_modf(f, unit):
if unit is None:
return [None], (dimensionless_unscaled, dimensionless_unscaled)
try:
return ([get_converter(unit, dimensionless_unscaled)],
(dimensionless_unscaled, dimensionless_unscaled))
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper__ones_like(f, unit):
return [None], dimensionless_unscaled
def helper_dimensionless_to_dimensionless(f, unit):
if unit is None:
return [None], dimensionless_unscaled
try:
return ([get_converter(unit, dimensionless_unscaled)],
dimensionless_unscaled)
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_dimensionless_to_radian(f, unit):
from .si import radian
if unit is None:
return [None], radian
try:
return [get_converter(unit, dimensionless_unscaled)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_degree_to_radian(f, unit):
from .si import degree, radian
try:
return [get_converter(unit, degree)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_degree(f, unit):
from .si import degree, radian
try:
return [get_converter(unit, radian)], degree
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_dimensionless(f, unit):
from .si import radian
try:
return [get_converter(unit, radian)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_frexp(f, unit):
if not unit.is_unity():
raise UnitTypeError("Can only apply '{0}' function to "
"unscaled dimensionless quantities"
.format(f.__name__))
return [None], (None, None)
# TWO ARGUMENT UFUNC HELPERS
#
# The functions below take a two arguments. The output of the helper function
# should be two values: a tuple of two converters to be used to scale the
# inputs before being passed to the ufunc (None if no conversion is needed),
# and the unit the output will be in.
def helper_multiplication(f, unit1, unit2):
return [None, None], _d(unit1) * _d(unit2)
def helper_division(f, unit1, unit2):
return [None, None], _d(unit1) / _d(unit2)
def helper_power(f, unit1, unit2):
# TODO: find a better way to do this, currently need to signal that one
# still needs to raise power of unit1 in main code
if unit2 is None:
return [None, None], False
try:
return [None, get_converter(unit2, dimensionless_unscaled)], False
except UnitsError:
raise UnitTypeError("Can only raise something to a "
"dimensionless quantity")
def helper_ldexp(f, unit1, unit2):
if unit2 is not None:
raise TypeError("Cannot use ldexp with a quantity "
"as second argument.")
else:
return [None, None], _d(unit1)
def helper_copysign(f, unit1, unit2):
# if first arg is not a quantity, just return plain array
if unit1 is None:
return [None, None], None
else:
return [None, None], unit1
def helper_heaviside(f, unit1, unit2):
try:
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply 'heaviside' function with a "
"dimensionless second argument.")
return ([None, converter2], dimensionless_unscaled)
def helper_two_arg_dimensionless(f, unit1, unit2):
try:
converter1 = (get_converter(unit1, dimensionless_unscaled)
if unit1 is not None else None)
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
return ([converter1, converter2], dimensionless_unscaled)
# This used to be a separate function that just called get_converters_and_unit.
# Using it directly saves a few us; keeping the clearer name.
helper_twoarg_invariant = get_converters_and_unit
def helper_twoarg_comparison(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, None
def helper_twoarg_invtrig(f, unit1, unit2):
from .si import radian
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, radian
def helper_twoarg_floor_divide(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, dimensionless_unscaled
def helper_divmod(f, unit1, unit2):
converters, result_unit = get_converters_and_unit(f, unit1, unit2)
return converters, (dimensionless_unscaled, result_unit)
def helper_degree_to_dimensionless(f, unit):
from .si import degree
try:
return [get_converter(unit, degree)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_degree_minute_second_to_radian(f, unit1, unit2, unit3):
from .si import degree, arcmin, arcsec, radian
try:
return [get_converter(unit1, degree),
get_converter(unit2, arcmin),
get_converter(unit3, arcsec)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
# list of ufuncs:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html#available-ufuncs
UFUNC_HELPERS = {}
UNSUPPORTED_UFUNCS = {
np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.invert, np.left_shift,
np.right_shift, np.logical_and, np.logical_or, np.logical_xor,
np.logical_not}
for name in 'isnat', 'gcd', 'lcm':
# isnat was introduced in numpy 1.14, gcd+lcm in 1.15
ufunc = getattr(np, name, None)
if isinstance(ufunc, np.ufunc):
UNSUPPORTED_UFUNCS |= {ufunc}
# SINGLE ARGUMENT UFUNCS
# ufuncs that return a boolean and do not care about the unit
onearg_test_ufuncs = (np.isfinite, np.isinf, np.isnan, np.sign, np.signbit)
for ufunc in onearg_test_ufuncs:
UFUNC_HELPERS[ufunc] = helper_onearg_test
# ufuncs that return a value with the same unit as the input
invariant_ufuncs = (np.absolute, np.fabs, np.conj, np.conjugate, np.negative,
np.spacing, np.rint, np.floor, np.ceil, np.trunc,
np.positive)
for ufunc in invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_invariant
# ufuncs that require dimensionless input and and give dimensionless output
dimensionless_to_dimensionless_ufuncs = (np.exp, np.expm1, np.exp2, np.log,
np.log10, np.log2, np.log1p)
# As found out in gh-7058, some numpy 1.13 conda installations also provide
# np.erf, even though upstream doesn't have it. We include it if present.
if isinstance(getattr(np.core.umath, 'erf', None), np.ufunc):
dimensionless_to_dimensionless_ufuncs += (np.core.umath.erf,)
for ufunc in dimensionless_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
# ufuncs that require dimensionless input and give output in radians
dimensionless_to_radian_ufuncs = (np.arccos, np.arcsin, np.arctan, np.arccosh,
np.arcsinh, np.arctanh)
for ufunc in dimensionless_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_radian
# ufuncs that require input in degrees and give output in radians
degree_to_radian_ufuncs = (np.radians, np.deg2rad)
for ufunc in degree_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_degree_to_radian
# ufuncs that require input in radians and give output in degrees
radian_to_degree_ufuncs = (np.degrees, np.rad2deg)
for ufunc in radian_to_degree_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_degree
# ufuncs that require input in radians and give dimensionless output
radian_to_dimensionless_ufuncs = (np.cos, np.sin, np.tan, np.cosh, np.sinh,
np.tanh)
for ufunc in radian_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_dimensionless
# ufuncs handled as special cases
UFUNC_HELPERS[np.sqrt] = helper_sqrt
UFUNC_HELPERS[np.square] = helper_square
UFUNC_HELPERS[np.reciprocal] = helper_reciprocal
UFUNC_HELPERS[np.cbrt] = helper_cbrt
UFUNC_HELPERS[np.core.umath._ones_like] = helper__ones_like
UFUNC_HELPERS[np.modf] = helper_modf
UFUNC_HELPERS[np.frexp] = helper_frexp
# TWO ARGUMENT UFUNCS
# two argument ufuncs that require dimensionless input and and give
# dimensionless output
two_arg_dimensionless_ufuncs = (np.logaddexp, np.logaddexp2)
for ufunc in two_arg_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless
# two argument ufuncs that return a value with the same unit as the input
twoarg_invariant_ufuncs = (np.add, np.subtract, np.hypot, np.maximum,
np.minimum, np.fmin, np.fmax, np.nextafter,
np.remainder, np.mod, np.fmod)
for ufunc in twoarg_invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invariant
# two argument ufuncs that need compatible inputs and return a boolean
twoarg_comparison_ufuncs = (np.greater, np.greater_equal, np.less,
np.less_equal, np.not_equal, np.equal)
for ufunc in twoarg_comparison_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_comparison
# two argument ufuncs that do inverse trigonometry
twoarg_invtrig_ufuncs = (np.arctan2,)
# another private function in numpy; use getattr in case it disappears
if isinstance(getattr(np.core.umath, '_arg', None), np.ufunc):
twoarg_invtrig_ufuncs += (np.core.umath._arg,)
for ufunc in twoarg_invtrig_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invtrig
# ufuncs handled as special cases
UFUNC_HELPERS[np.multiply] = helper_multiplication
UFUNC_HELPERS[np.divide] = helper_division
UFUNC_HELPERS[np.true_divide] = helper_division
UFUNC_HELPERS[np.power] = helper_power
UFUNC_HELPERS[np.ldexp] = helper_ldexp
UFUNC_HELPERS[np.copysign] = helper_copysign
UFUNC_HELPERS[np.floor_divide] = helper_twoarg_floor_divide
UFUNC_HELPERS[np.heaviside] = helper_heaviside
UFUNC_HELPERS[np.float_power] = helper_power
UFUNC_HELPERS[np.divmod] = helper_divmod
# ERFA UFUNCS
def helper_s2c(f, unit1, unit2):
from .si import radian
try:
return [get_converter(unit1, radian),
get_converter(unit2, radian)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_s2p(f, unit1, unit2, unit3):
from .si import radian
try:
return [get_converter(unit1, radian),
get_converter(unit2, radian), None], unit3
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_c2s(f, unit1):
from .si import radian
return [None], (radian, radian)
def helper_p2s(f, unit1):
from .si import radian
return [None], (radian, radian, unit1)
UFUNC_HELPERS[erfa_ufunc.s2c] = helper_s2c
UFUNC_HELPERS[erfa_ufunc.s2p] = helper_s2p
UFUNC_HELPERS[erfa_ufunc.c2s] = helper_c2s
UFUNC_HELPERS[erfa_ufunc.p2s] = helper_p2s
UFUNC_HELPERS[erfa_ufunc.pm] = helper_invariant
UFUNC_HELPERS[erfa_ufunc.pdp] = helper_multiplication
UFUNC_HELPERS[erfa_ufunc.pxp] = helper_multiplication
UFUNC_HELPERS[erfa_ufunc.rxp] = helper_multiplication
# UFUNCS FROM SCIPY.SPECIAL
# available ufuncs in this module are at
# https://docs.scipy.org/doc/scipy/reference/special.html
try:
import scipy
import scipy.special as sps
except ImportError:
pass
else:
from ..utils import minversion
# ufuncs that require dimensionless input and give dimensionless output
dimensionless_to_dimensionless_sps_ufuncs = [
sps.erf, sps.gamma, sps.gammasgn,
sps.psi, sps.rgamma, sps.erfc, sps.erfcx, sps.erfi, sps.wofz,
sps.dawsn, sps.entr, sps.exprel, sps.expm1, sps.log1p, sps.exp2,
sps.exp10, sps.j0, sps.j1, sps.y0, sps.y1, sps.i0, sps.i0e, sps.i1,
sps.i1e, sps.k0, sps.k0e, sps.k1, sps.k1e, sps.itj0y0,
sps.it2j0y0, sps.iti0k0, sps.it2i0k0]
# TODO: Revert https://github.com/astropy/astropy/pull/7219 when astropy
# requires scipy>=0.18.
# See https://github.com/astropy/astropy/issues/7159
if minversion(scipy, "0.18"):
dimensionless_to_dimensionless_sps_ufuncs.append(sps.loggamma)
for ufunc in dimensionless_to_dimensionless_sps_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
# ufuncs that require input in degrees and give dimensionless output
degree_to_dimensionless_sps_ufuncs = (
sps.cosdg, sps.sindg, sps.tandg, sps.cotdg)
for ufunc in degree_to_dimensionless_sps_ufuncs:
UFUNC_HELPERS[ufunc] = helper_degree_to_dimensionless
# ufuncs that require 2 dimensionless inputs and give dimensionless output.
# note: sps.jv and sps.jn are aliases in some scipy versions, which will
# cause the same key to be written twice, but since both are handled by the
# same helper there is no harm done.
two_arg_dimensionless_sps_ufuncs = (
sps.jv, sps.jn, sps.jve, sps.yn, sps.yv, sps.yve, sps.kn, sps.kv,
sps.kve, sps.iv, sps.ive, sps.hankel1, sps.hankel1e, sps.hankel2,
sps.hankel2e)
for ufunc in two_arg_dimensionless_sps_ufuncs:
UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless
# ufuncs handled as special cases
UFUNC_HELPERS[sps.cbrt] = helper_cbrt
UFUNC_HELPERS[sps.radian] = helper_degree_minute_second_to_radian
def converters_and_unit(function, method, *args):
"""Determine the required converters and the unit of the ufunc result.
Converters are functions required to convert to a ufunc's expected unit,
e.g., radian for np.sin; or to ensure units of two inputs are consistent,
e.g., for np.add. In these examples, the unit of the result would be
dimensionless_unscaled for np.sin, and the same consistent unit for np.add.
Parameters
----------
function : `~numpy.ufunc`
Numpy universal function
method : str
Method with which the function is evaluated, e.g.,
'__call__', 'reduce', etc.
*args : Quantity or other ndarray subclass
Input arguments to the function
Raises
------
TypeError : when the specified function cannot be used with Quantities
(e.g., np.logical_or), or when the routine does not know how to handle
the specified function (in which case an issue should be raised on
https://github.com/astropy/astropy).
UnitTypeError : when the conversion to the required (or consistent) units
is not possible.
"""
# Check whether we support this ufunc, by getting the helper function
# (defined above) which returns a list of function(s) that convert the
# input(s) to the unit required for the ufunc, as well as the unit the
# result will have (a tuple of units if there are multiple outputs).
try:
ufunc_helper = UFUNC_HELPERS[function]
except KeyError:
if function in UNSUPPORTED_UFUNCS:
raise TypeError("Cannot use function '{0}' with quantities"
.format(function.__name__))
else:
raise TypeError("Unknown ufunc {0}. Please raise issue on "
"https://github.com/astropy/astropy"
.format(function.__name__))
if method == '__call__' or (method == 'outer' and function.nin == 2):
# Find out the units of the arguments passed to the ufunc; usually,
# at least one is a quantity, but for two-argument ufuncs, the second
# could also be a Numpy array, etc. These are given unit=None.
units = [getattr(arg, 'unit', None) for arg in args]
# Determine possible conversion functions, and the result unit.
converters, result_unit = ufunc_helper(function, *units)
if any(converter is False for converter in converters):
# for two-argument ufuncs with a quantity and a non-quantity,
# the quantity normally needs to be dimensionless, *except*
# if the non-quantity can have arbitrary unit, i.e., when it
# is all zero, infinity or NaN. In that case, the non-quantity
# can just have the unit of the quantity
# (this allows, e.g., `q > 0.` independent of unit)
maybe_arbitrary_arg = args[converters.index(False)]
try:
if can_have_arbitrary_unit(maybe_arbitrary_arg):
converters = [None, None]
else:
raise UnitsError("Can only apply '{0}' function to "
"dimensionless quantities when other "
"argument is not a quantity (unless the "
"latter is all zero/infinity/nan)"
.format(function.__name__))
except TypeError:
# _can_have_arbitrary_unit failed: arg could not be compared
# with zero or checked to be finite. Then, ufunc will fail too.
raise TypeError("Unsupported operand type(s) for ufunc {0}: "
"'{1}' and '{2}'"
.format(function.__name__,
args[0].__class__.__name__,
args[1].__class__.__name__))
# In the case of np.power and np.float_power, the unit itself needs to
# be modified by an amount that depends on one of the input values,
# so we need to treat this as a special case.
# TODO: find a better way to deal with this.
if result_unit is False:
if units[0] is None or units[0] == dimensionless_unscaled:
result_unit = dimensionless_unscaled
else:
if units[1] is None:
p = args[1]
else:
p = args[1].to(dimensionless_unscaled).value
try:
result_unit = units[0] ** p
except ValueError as exc:
# Changing the unit does not work for, e.g., array-shaped
# power, but this is OK if we're (scaled) dimensionless.
try:
converters[0] = units[0]._get_converter(
dimensionless_unscaled)
except UnitConversionError:
raise exc
else:
result_unit = dimensionless_unscaled
else: # methods for which the unit should stay the same
nin = function.nin
unit = getattr(args[0], 'unit', None)
if method == 'at' and nin <= 2:
if nin == 1:
units = [unit]
else:
units = [unit, getattr(args[2], 'unit', None)]
converters, result_unit = ufunc_helper(function, *units)
# ensure there is no 'converter' for indices (2nd argument)
converters.insert(1, None)
elif method in {'reduce', 'accumulate', 'reduceat'} and nin == 2:
converters, result_unit = ufunc_helper(function, unit, unit)
converters = converters[:1]
if method == 'reduceat':
# add 'scale' for indices (2nd argument)
converters += [None]
else:
if method in {'reduce', 'accumulate',
'reduceat', 'outer'} and nin != 2:
raise ValueError("{0} only supported for binary functions"
.format(method))
raise TypeError("Unexpected ufunc method {0}. If this should "
"work, please raise an issue on"
"https://github.com/astropy/astropy"
.format(method))
# for all but __call__ method, scaling is not allowed
if unit is not None and result_unit is None:
raise TypeError("Cannot use '{1}' method on ufunc {0} with a "
"Quantity instance as the result is not a "
"Quantity.".format(function.__name__, method))
if (converters[0] is not None or
(unit is not None and unit is not result_unit and
(not result_unit.is_equivalent(unit) or
result_unit.to(unit) != 1.))):
raise UnitsError("Cannot use '{1}' method on ufunc {0} with a "
"Quantity instance as it would change the unit."
.format(function.__name__, method))
return converters, result_unit
def check_output(output, unit, inputs, function=None):
"""Check that function output can be stored in the output array given.
Parameters
----------
output : array or `~astropy.units.Quantity` or tuple
Array that should hold the function output (or tuple of such arrays).
unit : `~astropy.units.Unit` or None, or tuple
Unit that the output will have, or `None` for pure numbers (should be
tuple of same if output is a tuple of outputs).
inputs : tuple
Any input arguments. These should be castable to the output.
function : callable
The function that will be producing the output. If given, used to
give a more informative error message.
Returns
-------
arrays : `~numpy.ndarray` view of ``output`` (or tuple of such views).
Raises
------
UnitTypeError : If ``unit`` is inconsistent with the class of ``output``
TypeError : If the ``inputs`` cannot be cast safely to ``output``.
"""
if isinstance(output, tuple):
return tuple(check_output(output_, unit_, inputs, function)
for output_, unit_ in zip(output, unit))
# ``None`` indicates no actual array is needed. This can happen, e.g.,
# with np.modf(a, out=(None, b)).
if output is None:
return None
if hasattr(output, '__quantity_subclass__'):
# Check that we're not trying to store a plain Numpy array or a
# Quantity with an inconsistent unit (e.g., not angular for Angle).
if unit is None:
raise TypeError("Cannot store non-quantity output{0} in {1} "
"instance".format(
(" from {0} function".format(function.__name__)
if function is not None else ""),
type(output)))
if output.__quantity_subclass__(unit)[0] is not type(output):
raise UnitTypeError(
"Cannot store output with unit '{0}'{1} "
"in {2} instance. Use {3} instance instead."
.format(unit, (" from {0} function".format(function.__name__)
if function is not None else ""), type(output),
output.__quantity_subclass__(unit)[0]))
# Turn into ndarray, so we do not loop into array_wrap/array_ufunc
# if the output is used to store results of a function.
output = output.view(np.ndarray)
else:
# output is not a Quantity, so cannot obtain a unit.
if not (unit is None or unit is dimensionless_unscaled):
raise UnitTypeError("Cannot store quantity with dimension "
"{0}in a non-Quantity instance."
.format("" if function is None else
"resulting from {0} function "
.format(function.__name__)))
# check we can handle the dtype (e.g., that we are not int
# when float is required).
if not np.can_cast(np.result_type(*inputs), output.dtype,
casting='same_kind'):
raise TypeError("Arguments cannot be cast safely to inplace "
"output with dtype={0}".format(output.dtype))
return output
|
e688ee26f00bc4f538d3079efb1258d3f65055c9869f8c35619c67c1ddb51c9d | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Core units classes and functions
"""
import inspect
import operator
import textwrap
import warnings
import numpy as np
from ..utils.decorators import lazyproperty
from ..utils.exceptions import AstropyWarning
from ..utils.misc import isiterable, InheritDocstrings
from .utils import (is_effectively_unity, sanitize_scale, validate_power,
resolve_fractions)
from . import format as unit_format
__all__ = [
'UnitsError', 'UnitsWarning', 'UnitConversionError', 'UnitTypeError',
'UnitBase', 'NamedUnit', 'IrreducibleUnit', 'Unit', 'CompositeUnit',
'PrefixUnit', 'UnrecognizedUnit', 'def_unit', 'get_current_unit_registry',
'set_enabled_units', 'add_enabled_units',
'set_enabled_equivalencies', 'add_enabled_equivalencies',
'dimensionless_unscaled', 'one']
UNITY = 1.0
def _flatten_units_collection(items):
"""
Given a list of sequences, modules or dictionaries of units, or
single units, return a flat set of all the units found.
"""
if not isinstance(items, list):
items = [items]
result = set()
for item in items:
if isinstance(item, UnitBase):
result.add(item)
else:
if isinstance(item, dict):
units = item.values()
elif inspect.ismodule(item):
units = vars(item).values()
elif isiterable(item):
units = item
else:
continue
for unit in units:
if isinstance(unit, UnitBase):
result.add(unit)
return result
def _normalize_equivalencies(equivalencies):
"""
Normalizes equivalencies, ensuring each is a 4-tuple of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs
Raises
------
ValueError if an equivalency cannot be interpreted
"""
if equivalencies is None:
return []
normalized = []
for i, equiv in enumerate(equivalencies):
if len(equiv) == 2:
funit, tunit = equiv
a = b = lambda x: x
elif len(equiv) == 3:
funit, tunit, a = equiv
b = a
elif len(equiv) == 4:
funit, tunit, a, b = equiv
else:
raise ValueError(
"Invalid equivalence entry {0}: {1!r}".format(i, equiv))
if not (funit is Unit(funit) and
(tunit is None or tunit is Unit(tunit)) and
callable(a) and
callable(b)):
raise ValueError(
"Invalid equivalence entry {0}: {1!r}".format(i, equiv))
normalized.append((funit, tunit, a, b))
return normalized
class _UnitRegistry:
"""
Manages a registry of the enabled units.
"""
def __init__(self, init=[], equivalencies=[]):
if isinstance(init, _UnitRegistry):
# If passed another registry we don't need to rebuild everything.
# but because these are mutable types we don't want to create
# conflicts so everything needs to be copied.
self._equivalencies = init._equivalencies.copy()
self._all_units = init._all_units.copy()
self._registry = init._registry.copy()
self._non_prefix_units = init._non_prefix_units.copy()
# The physical type is a dictionary containing sets as values.
# All of these must be copied otherwise we could alter the old
# registry.
self._by_physical_type = {k: v.copy() for k, v in
init._by_physical_type.items()}
else:
self._reset_units()
self._reset_equivalencies()
self.add_enabled_units(init)
self.add_enabled_equivalencies(equivalencies)
def _reset_units(self):
self._all_units = set()
self._non_prefix_units = set()
self._registry = {}
self._by_physical_type = {}
def _reset_equivalencies(self):
self._equivalencies = set()
@property
def registry(self):
return self._registry
@property
def all_units(self):
return self._all_units
@property
def non_prefix_units(self):
return self._non_prefix_units
def set_enabled_units(self, units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequences, dicts, or modules containing units, or units
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by
methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
"""
self._reset_units()
return self.add_enabled_units(units)
def add_enabled_units(self, units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequences, dicts, or modules containing units, or units
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for
searching through by methods like
`UnitBase.find_equivalent_units` and `UnitBase.compose`.
"""
units = _flatten_units_collection(units)
for unit in units:
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for st in unit._names:
if (st in self._registry and unit != self._registry[st]):
raise ValueError(
"Object with name {0!r} already exists in namespace. "
"Filter the set of units to avoid name clashes before "
"enabling them.".format(st))
for st in unit._names:
self._registry[st] = unit
self._all_units.add(unit)
if not isinstance(unit, PrefixUnit):
self._non_prefix_units.add(unit)
hash = unit._get_physical_type_id()
self._by_physical_type.setdefault(hash, set()).add(unit)
def get_units_with_physical_type(self, unit):
"""
Get all units in the registry with the same physical type as
the given unit.
Parameters
----------
unit : UnitBase instance
"""
return self._by_physical_type.get(unit._get_physical_type_id(), set())
@property
def equivalencies(self):
return list(self._equivalencies)
def set_enabled_equivalencies(self, equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of equivalent pairs
E.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
self._reset_equivalencies()
return self.add_enabled_equivalencies(equivalencies)
def add_enabled_equivalencies(self, equivalencies):
"""
Adds to the set of equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of equivalent pairs
E.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# pre-normalize list to help catch mistakes
equivalencies = _normalize_equivalencies(equivalencies)
self._equivalencies |= set(equivalencies)
class _UnitContext:
def __init__(self, init=[], equivalencies=[]):
_unit_registries.append(
_UnitRegistry(init=init, equivalencies=equivalencies))
def __enter__(self):
pass
def __exit__(self, type, value, tb):
_unit_registries.pop()
_unit_registries = [_UnitRegistry()]
def get_current_unit_registry():
return _unit_registries[-1]
def set_enabled_units(units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequences, dicts, or modules containing units, or units
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by methods
like `UnitBase.find_equivalent_units` and `UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> with u.set_enabled_units([u.pc]):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
pc | 3.08568e+16 m | parsec ,
]
>>> u.m.find_equivalent_units()
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
micron | 1e-06 m | ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
]
"""
# get a context with a new registry, using equivalencies of the current one
context = _UnitContext(
equivalencies=get_current_unit_registry().equivalencies)
# in this new current registry, enable the units requested
get_current_unit_registry().set_enabled_units(units)
return context
def add_enabled_units(units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequences, dicts, or modules containing units, or units
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for searching
through by methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> from astropy.units import imperial
>>> with u.add_enabled_units(imperial):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
ft | 0.3048 m | foot ,
fur | 201.168 m | furlong ,
inch | 0.0254 m | ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
mi | 1609.34 m | mile ,
micron | 1e-06 m | ,
mil | 2.54e-05 m | thou ,
nmi | 1852 m | nauticalmile, NM ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
yd | 0.9144 m | yard ,
]
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further units requested
get_current_unit_registry().add_enabled_units(units)
return context
def set_enabled_equivalencies(equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of equivalent pairs
E.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
Examples
--------
Exponentiation normally requires dimensionless quantities. To avoid
problems with complex phases::
>>> from astropy import units as u
>>> with u.set_enabled_equivalencies(u.dimensionless_angles()):
... phase = 0.5 * u.cycle
... np.exp(1j*phase) # doctest: +SKIP
<Quantity -1. +1.22464680e-16j>
"""
# doctest skipped as the complex number formatting changed in numpy 1.14.
#
# get a context with a new registry, using all units of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the equivalencies requested
get_current_unit_registry().set_enabled_equivalencies(equivalencies)
return context
def add_enabled_equivalencies(equivalencies):
"""
Adds to the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Since no equivalencies are enabled by default, generally it is recommended
to use `set_enabled_equivalencies`.
Parameters
----------
equivalencies : list of equivalent pairs
E.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().add_enabled_equivalencies(equivalencies)
return context
class UnitsError(Exception):
"""
The base class for unit-specific exceptions.
"""
class UnitScaleError(UnitsError, ValueError):
"""
Used to catch the errors involving scaled units,
which are not recognized by FITS format.
"""
pass
class UnitConversionError(UnitsError, ValueError):
"""
Used specifically for errors related to converting between units or
interpreting units in terms of other units.
"""
class UnitTypeError(UnitsError, TypeError):
"""
Used specifically for errors in setting to units not allowed by a class.
E.g., would be raised if the unit of an `~astropy.coordinates.Angle`
instances were set to a non-angular unit.
"""
class UnitsWarning(AstropyWarning):
"""
The base class for unit-specific warnings.
"""
class UnitBase(metaclass=InheritDocstrings):
"""
Abstract base class for units.
Most of the arithmetic operations on units are defined in this
base class.
Should not be instantiated by users directly.
"""
# Make sure that __rmul__ of units gets called over the __mul__ of Numpy
# arrays to avoid element-wise multiplication.
__array_priority__ = 1000
def __deepcopy__(self, memo):
# This may look odd, but the units conversion will be very
# broken after deep-copying if we don't guarantee that a given
# physical unit corresponds to only one instance
return self
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return unit_format.Latex.to_string(self)
def __bytes__(self):
"""Return string representation for unit"""
return unit_format.Generic.to_string(self).encode('unicode_escape')
def __str__(self):
"""Return string representation for unit"""
return unit_format.Generic.to_string(self)
def __repr__(self):
string = unit_format.Generic.to_string(self)
return 'Unit("{0}")'.format(string)
def _get_physical_type_id(self):
"""
Returns an identifier that uniquely identifies the physical
type of this unit. It is comprised of the bases and powers of
this unit, without the scale. Since it is hashable, it is
useful as a dictionary key.
"""
unit = self.decompose()
r = zip([x.name for x in unit.bases], unit.powers)
# bases and powers are already sorted in a unique way
# r.sort()
r = tuple(r)
return r
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. "
"Perhaps you meant to_string()?")
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. "
"Perhaps you meant to_string()?")
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
raise AttributeError(
"Can not get aliases from unnamed units. "
"Perhaps you meant to_string()?")
@property
def scale(self):
"""
Return the scale of the unit.
"""
return 1.0
@property
def bases(self):
"""
Return the bases of the unit.
"""
return [self]
@property
def powers(self):
"""
Return the powers of the unit.
"""
return [1]
def to_string(self, format=unit_format.Generic):
"""
Output the unit in the given format as a string.
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
"""
f = unit_format.get_format(format)
return f.to_string(self)
def __format__(self, format_spec):
"""Try to format units using a formatter."""
try:
return self.to_string(format=format_spec)
except ValueError:
return format(str(self), format_spec)
@staticmethod
def _normalize_equivalencies(equivalencies):
"""
Normalizes equivalencies, ensuring each is a 4-tuple of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs, or `None`
Returns
-------
A normalized list, including possible global defaults set by, e.g.,
`set_enabled_equivalencies`, except when `equivalencies`=`None`,
in which case the returned list is always empty.
Raises
------
ValueError if an equivalency cannot be interpreted
"""
normalized = _normalize_equivalencies(equivalencies)
if equivalencies is not None:
normalized += get_current_unit_registry().equivalencies
return normalized
def __pow__(self, p):
p = validate_power(p)
return CompositeUnit(1, [self], [p], _error_check=False)
def __div__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
return CompositeUnit(1, [self, m], [1, -1], _error_check=False)
try:
# Cannot handle this as Unit, re-try as Quantity
from .quantity import Quantity
return Quantity(1, self) / m
except TypeError:
return NotImplemented
def __rdiv__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) / self
try:
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a
# unit, for the common case of <array> / <unit>.
from .quantity import Quantity
if hasattr(m, 'unit'):
result = Quantity(m)
result /= self
return result
else:
return Quantity(m, self**(-1))
except TypeError:
return NotImplemented
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __mul__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
elif self.is_unity():
return m
return CompositeUnit(1, [self, m], [1, 1], _error_check=False)
# Cannot handle this as Unit, re-try as Quantity.
try:
from .quantity import Quantity
return Quantity(1, self) * m
except TypeError:
return NotImplemented
def __rmul__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) * self
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a unit
# for the common case of <array> * <unit>.
try:
from .quantity import Quantity
if hasattr(m, 'unit'):
result = Quantity(m)
result *= self
return result
else:
return Quantity(m, self)
except TypeError:
return NotImplemented
def __hash__(self):
# This must match the hash used in CompositeUnit for a unit
# with only one base and no scale or power.
return hash((str(self.scale), self.name, str('1')))
def __eq__(self, other):
if self is other:
return True
try:
other = Unit(other, parse_strict='silent')
except (ValueError, UnitsError, TypeError):
return False
# Other is Unit-like, but the test below requires it is a UnitBase
# instance; if it is not, give up (so that other can try).
if not isinstance(other, UnitBase):
return NotImplemented
try:
return is_effectively_unity(self._to(other))
except UnitsError:
return False
def __ne__(self, other):
return not (self == other)
def __le__(self, other):
scale = self._to(Unit(other))
return scale <= 1. or is_effectively_unity(scale)
def __ge__(self, other):
scale = self._to(Unit(other))
return scale >= 1. or is_effectively_unity(scale)
def __lt__(self, other):
return not (self >= other)
def __gt__(self, other):
return not (self <= other)
def __neg__(self):
return self * -1.
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : unit object or string or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
bool
"""
equivalencies = self._normalize_equivalencies(equivalencies)
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies=equivalencies)
for u in other)
other = Unit(other, parse_strict='silent')
return self._is_equivalent(other, equivalencies)
def _is_equivalent(self, other, equivalencies=[]):
"""Returns `True` if this unit is equivalent to `other`.
See `is_equivalent`, except that a proper Unit object should be
given (i.e., no string) and that the equivalency list should be
normalized using `_normalize_equivalencies`.
"""
if isinstance(other, UnrecognizedUnit):
return False
if (self._get_physical_type_id() ==
other._get_physical_type_id()):
return True
elif len(equivalencies):
unit = self.decompose()
other = other.decompose()
for a, b, forward, backward in equivalencies:
if b is None:
# after canceling, is what's left convertible
# to dimensionless (according to the equivalency)?
try:
(other/unit).decompose([a])
return True
except Exception:
pass
else:
if(a._is_equivalent(unit) and b._is_equivalent(other) or
b._is_equivalent(unit) and a._is_equivalent(other)):
return True
return False
def _apply_equivalencies(self, unit, other, equivalencies):
"""
Internal function (used from `_get_converter`) to apply
equivalence pairs.
"""
def make_converter(scale1, func, scale2):
def convert(v):
return func(_condition_arg(v) / scale1) * scale2
return convert
for funit, tunit, a, b in equivalencies:
if tunit is None:
try:
ratio_in_funit = (other.decompose() /
unit.decompose()).decompose([funit])
return make_converter(ratio_in_funit.scale, a, 1.)
except UnitsError:
pass
else:
try:
scale1 = funit._to(unit)
scale2 = tunit._to(other)
return make_converter(scale1, a, scale2)
except UnitsError:
pass
try:
scale1 = tunit._to(unit)
scale2 = funit._to(other)
return make_converter(scale1, b, scale2)
except UnitsError:
pass
def get_err_str(unit):
unit_str = unit.to_string('unscaled')
physical_type = unit.physical_type
if physical_type != 'unknown':
unit_str = "'{0}' ({1})".format(
unit_str, physical_type)
else:
unit_str = "'{0}'".format(unit_str)
return unit_str
unit_str = get_err_str(unit)
other_str = get_err_str(other)
raise UnitConversionError(
"{0} and {1} are not convertible".format(
unit_str, other_str))
def _get_converter(self, other, equivalencies=[]):
other = Unit(other)
# First see if it is just a scaling.
try:
scale = self._to(other)
except UnitsError:
pass
else:
return lambda val: scale * _condition_arg(val)
# if that doesn't work, maybe we can do it with equivalencies?
try:
return self._apply_equivalencies(
self, other, self._normalize_equivalencies(equivalencies))
except UnitsError as exc:
# Last hope: maybe other knows how to do it?
# We assume the equivalencies have the unit itself as first item.
# TODO: maybe better for other to have a `_back_converter` method?
if hasattr(other, 'equivalencies'):
for funit, tunit, a, b in other.equivalencies:
if other is funit:
try:
return lambda v: b(self._get_converter(
tunit, equivalencies=equivalencies)(v))
except Exception:
pass
raise exc
def _to(self, other):
"""
Returns the scale to the specified unit.
See `to`, except that a Unit object should be given (i.e., no
string), and that all defaults are used, i.e., no
equivalencies and value=1.
"""
# There are many cases where we just want to ensure a Quantity is
# of a particular unit, without checking whether it's already in
# a particular unit. If we're being asked to convert from a unit
# to itself, we can short-circuit all of this.
if self is other:
return 1.0
# Don't presume decomposition is possible; e.g.,
# conversion to function units is through equivalencies.
if isinstance(other, UnitBase):
self_decomposed = self.decompose()
other_decomposed = other.decompose()
# Check quickly whether equivalent. This is faster than
# `is_equivalent`, because it doesn't generate the entire
# physical type list of both units. In other words it "fails
# fast".
if(self_decomposed.powers == other_decomposed.powers and
all(self_base is other_base for (self_base, other_base)
in zip(self_decomposed.bases, other_decomposed.bases))):
return self_decomposed.scale / other_decomposed.scale
raise UnitConversionError(
"'{0!r}' is not a scaled version of '{1!r}'".format(self, other))
def to(self, other, value=UNITY, equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : unit object or string
The unit to convert to.
value : scalar int or float, or sequence convertible to array, optional
Value(s) in the current unit to be converted to the
specified unit. If not provided, defaults to 1.0
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
UnitsError
If units are inconsistent
"""
if other is self and value is UNITY:
return UNITY
else:
return self._get_converter(other, equivalencies=equivalencies)(value)
def in_units(self, other, value=1.0, equivalencies=[]):
"""
Alias for `to` for backward compatibility with pynbody.
"""
return self.to(
other, value=value, equivalencies=equivalencies)
def decompose(self, bases=set()):
"""
Return a unit object composed of only irreducible units.
Parameters
----------
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `UnitsError` if it's not possible
to do so.
Returns
-------
unit : CompositeUnit object
New object containing only irreducible unit objects.
"""
raise NotImplementedError()
def _compose(self, equivalencies=[], namespace=[], max_depth=2, depth=0,
cached_results=None):
def is_final_result(unit):
# Returns True if this result contains only the expected
# units
for base in unit.bases:
if base not in namespace:
return False
return True
unit = self.decompose()
key = hash(unit)
cached = cached_results.get(key)
if cached is not None:
if isinstance(cached, Exception):
raise cached
return cached
# Prevent too many levels of recursion
# And special case for dimensionless unit
if depth >= max_depth:
cached_results[key] = [unit]
return [unit]
# Make a list including all of the equivalent units
units = [unit]
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self._is_equivalent(funit):
scale = funit.decompose().scale / unit.scale
units.append(Unit(a(1.0 / scale) * tunit).decompose())
elif self._is_equivalent(tunit):
scale = tunit.decompose().scale / unit.scale
units.append(Unit(b(1.0 / scale) * funit).decompose())
else:
if self._is_equivalent(funit):
units.append(Unit(unit.scale))
# Store partial results
partial_results = []
# Store final results that reduce to a single unit or pair of
# units
if len(unit.bases) == 0:
final_results = [set([unit]), set()]
else:
final_results = [set(), set()]
for tunit in namespace:
tunit_decomposed = tunit.decompose()
for u in units:
# If the unit is a base unit, look for an exact match
# to one of the bases of the target unit. If found,
# factor by the same power as the target unit's base.
# This allows us to factor out fractional powers
# without needing to do an exhaustive search.
if len(tunit_decomposed.bases) == 1:
for base, power in zip(u.bases, u.powers):
if tunit_decomposed._is_equivalent(base):
tunit = tunit ** power
tunit_decomposed = tunit_decomposed ** power
break
composed = (u / tunit_decomposed).decompose()
factored = composed * tunit
len_bases = len(composed.bases)
if is_final_result(factored) and len_bases <= 1:
final_results[len_bases].add(factored)
else:
partial_results.append(
(len_bases, composed, tunit))
# Do we have any minimal results?
for final_result in final_results:
if len(final_result):
results = final_results[0].union(final_results[1])
cached_results[key] = results
return results
partial_results.sort(key=operator.itemgetter(0))
# ...we have to recurse and try to further compose
results = []
for len_bases, composed, tunit in partial_results:
try:
composed_list = composed._compose(
equivalencies=equivalencies,
namespace=namespace,
max_depth=max_depth, depth=depth + 1,
cached_results=cached_results)
except UnitsError:
composed_list = []
for subcomposed in composed_list:
results.append(
(len(subcomposed.bases), subcomposed, tunit))
if len(results):
results.sort(key=operator.itemgetter(0))
min_length = results[0][0]
subresults = set()
for len_bases, composed, tunit in results:
if len_bases > min_length:
break
else:
factored = composed * tunit
if is_final_result(factored):
subresults.add(factored)
if len(subresults):
cached_results[key] = subresults
return subresults
if not is_final_result(self):
result = UnitsError(
"Cannot represent unit {0} in terms of the given "
"units".format(self))
cached_results[key] = result
raise result
cached_results[key] = [self]
return [self]
def compose(self, equivalencies=[], units=None, max_depth=2,
include_prefix_units=None):
"""
Return the simplest possible composite unit(s) that represent
the given unit. Since there may be multiple equally simple
compositions of the unit, a list of units is always returned.
Parameters
----------
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to also list. See
:ref:`unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
units : set of units to compose to, optional
If not provided, any known units may be used to compose
into. Otherwise, ``units`` is a dict, module or sequence
containing the units to compose into.
max_depth : int, optional
The maximum recursion depth to use when composing into
composite units.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `True` if a sequence is passed in to ``units``,
`False` otherwise.
Returns
-------
units : list of `CompositeUnit`
A list of candidate compositions. These will all be
equally simple, but it may not be possible to
automatically determine which of the candidates are
better.
"""
# if units parameter is specified and is a sequence (list|tuple),
# include_prefix_units is turned on by default. Ex: units=[u.kpc]
if include_prefix_units is None:
include_prefix_units = isinstance(units, (list, tuple))
# Pre-normalize the equivalencies list
equivalencies = self._normalize_equivalencies(equivalencies)
# The namespace of units to compose into should be filtered to
# only include units with bases in common with self, otherwise
# they can't possibly provide useful results. Having too many
# destination units greatly increases the search space.
def has_bases_in_common(a, b):
if len(a.bases) == 0 and len(b.bases) == 0:
return True
for ab in a.bases:
for bb in b.bases:
if ab == bb:
return True
return False
def has_bases_in_common_with_equiv(unit, other):
if has_bases_in_common(unit, other):
return True
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if unit._is_equivalent(funit):
if has_bases_in_common(tunit.decompose(), other):
return True
elif unit._is_equivalent(tunit):
if has_bases_in_common(funit.decompose(), other):
return True
else:
if unit._is_equivalent(funit):
if has_bases_in_common(dimensionless_unscaled, other):
return True
return False
def filter_units(units):
filtered_namespace = set()
for tunit in units:
if (isinstance(tunit, UnitBase) and
(include_prefix_units or
not isinstance(tunit, PrefixUnit)) and
has_bases_in_common_with_equiv(
decomposed, tunit.decompose())):
filtered_namespace.add(tunit)
return filtered_namespace
decomposed = self.decompose()
if units is None:
units = filter_units(self._get_units_with_same_physical_type(
equivalencies=equivalencies))
if len(units) == 0:
units = get_current_unit_registry().non_prefix_units
elif isinstance(units, dict):
units = set(filter_units(units.values()))
elif inspect.ismodule(units):
units = filter_units(vars(units).values())
else:
units = filter_units(_flatten_units_collection(units))
def sort_results(results):
if not len(results):
return []
# Sort the results so the simplest ones appear first.
# Simplest is defined as "the minimum sum of absolute
# powers" (i.e. the fewest bases), and preference should
# be given to results where the sum of powers is positive
# and the scale is exactly equal to 1.0
results = list(results)
results.sort(key=lambda x: np.abs(x.scale))
results.sort(key=lambda x: np.sum(np.abs(x.powers)))
results.sort(key=lambda x: np.sum(x.powers) < 0.0)
results.sort(key=lambda x: not is_effectively_unity(x.scale))
last_result = results[0]
filtered = [last_result]
for result in results[1:]:
if str(result) != str(last_result):
filtered.append(result)
last_result = result
return filtered
return sort_results(self._compose(
equivalencies=equivalencies, namespace=units,
max_depth=max_depth, depth=0, cached_results={}))
def to_system(self, system):
"""
Converts this unit into ones belonging to the given system.
Since more than one result may be possible, a list is always
returned.
Parameters
----------
system : module
The module that defines the unit system. Commonly used
ones include `astropy.units.si` and `astropy.units.cgs`.
To use your own module it must contain unit objects and a
sequence member named ``bases`` containing the base units of
the system.
Returns
-------
units : list of `CompositeUnit`
The list is ranked so that units containing only the base
units of that system will appear first.
"""
bases = set(system.bases)
def score(compose):
# In case that compose._bases has no elements we return
# 'np.inf' as 'score value'. It does not really matter which
# number we would return. This case occurs for instance for
# dimensionless quantities:
compose_bases = compose.bases
if len(compose_bases) == 0:
return np.inf
else:
sum = 0
for base in compose_bases:
if base in bases:
sum += 1
return sum / float(len(compose_bases))
x = self.decompose(bases=bases)
composed = x.compose(units=system)
composed = sorted(composed, key=score, reverse=True)
return composed
@lazyproperty
def si(self):
"""
Returns a copy of the current `Unit` instance in SI units.
"""
from . import si
return self.to_system(si)[0]
@lazyproperty
def cgs(self):
"""
Returns a copy of the current `Unit` instance with CGS units.
"""
from . import cgs
return self.to_system(cgs)[0]
@property
def physical_type(self):
"""
Return the physical type on the unit.
Examples
--------
>>> from astropy import units as u
>>> print(u.m.physical_type)
length
"""
from . import physical
return physical.get_physical_type(self)
def _get_units_with_same_physical_type(self, equivalencies=[]):
"""
Return a list of registered units with the same physical type
as this unit.
This function is used by Quantity to add its built-in
conversions to equivalent units.
This is a private method, since end users should be encouraged
to use the more powerful `compose` and `find_equivalent_units`
methods (which use this under the hood).
Parameters
----------
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to also pull options from.
See :ref:`unit_equivalencies`. It must already be
normalized using `_normalize_equivalencies`.
"""
unit_registry = get_current_unit_registry()
units = set(unit_registry.get_units_with_physical_type(self))
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self.is_equivalent(funit) and tunit not in units:
units.update(
unit_registry.get_units_with_physical_type(tunit))
if self._is_equivalent(tunit) and funit not in units:
units.update(
unit_registry.get_units_with_physical_type(funit))
else:
if self.is_equivalent(funit):
units.add(dimensionless_unscaled)
return units
class EquivalentUnitsList(list):
"""
A class to handle pretty-printing the result of
`find_equivalent_units`.
"""
def __repr__(self):
if len(self) == 0:
return "[]"
else:
lines = []
for u in self:
irred = u.decompose().to_string()
if irred == u.name:
irred = "irreducible"
lines.append((u.name, irred, ', '.join(u.aliases)))
lines.sort()
lines.insert(0, ('Primary name', 'Unit definition', 'Aliases'))
widths = [0, 0, 0]
for line in lines:
for i, col in enumerate(line):
widths[i] = max(widths[i], len(col))
f = " {{0:<{0}s}} | {{1:<{1}s}} | {{2:<{2}s}}".format(*widths)
lines = [f.format(*line) for line in lines]
lines = (lines[0:1] +
['['] +
['{0} ,'.format(x) for x in lines[1:]] +
[']'])
return '\n'.join(lines)
def find_equivalent_units(self, equivalencies=[], units=None,
include_prefix_units=False):
"""
Return a list of all the units that are the same type as ``self``.
Parameters
----------
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to also list. See
:ref:`unit_equivalencies`.
Any list given, including an empty one, supercedes global defaults
that may be in effect (as set by `set_enabled_equivalencies`)
units : set of units to search in, optional
If not provided, all defined units will be searched for
equivalencies. Otherwise, may be a dict, module or
sequence containing the units to search for equivalencies.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `False`.
Returns
-------
units : list of `UnitBase`
A list of unit objects that match ``u``. A subclass of
`list` (``EquivalentUnitsList``) is returned that
pretty-prints the list of units when output.
"""
results = self.compose(
equivalencies=equivalencies, units=units, max_depth=1,
include_prefix_units=include_prefix_units)
results = set(
x.bases[0] for x in results if len(x.bases) == 1)
return self.EquivalentUnitsList(results)
def is_unity(self):
"""
Returns `True` if the unit is unscaled and dimensionless.
"""
return False
class NamedUnit(UnitBase):
"""
The base class of units that have a name.
Parameters
----------
st : str, list of str, 2-tuple
The name of the unit. If a list of strings, the first element
is the canonical (short) name, and the rest of the elements
are aliases. If a tuple of lists, the first element is a list
of short names, and the second element is a list of long
names; all but the first short name are considered "aliases".
Each name *should* be a valid Python identifier to make it
easy to access, but this is not required.
namespace : dict, optional
When provided, inject the unit, and all of its aliases, in the
given namespace dictionary. If a unit by the same name is
already in the namespace, a ValueError is raised.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, doc=None, format=None, namespace=None):
UnitBase.__init__(self)
if isinstance(st, (bytes, str)):
self._names = [st]
self._short_names = [st]
self._long_names = []
elif isinstance(st, tuple):
if not len(st) == 2:
raise ValueError("st must be string, list or 2-tuple")
self._names = st[0] + [n for n in st[1] if n not in st[0]]
if not len(self._names):
raise ValueError("must provide at least one name")
self._short_names = st[0][:]
self._long_names = st[1][:]
else:
if len(st) == 0:
raise ValueError(
"st list must have at least one entry")
self._names = st[:]
self._short_names = [st[0]]
self._long_names = st[1:]
if format is None:
format = {}
self._format = format
if doc is None:
doc = self._generate_doc()
else:
doc = textwrap.dedent(doc)
doc = textwrap.fill(doc)
self.__doc__ = doc
self._inject(namespace)
def _generate_doc(self):
"""
Generate a docstring for the unit if the user didn't supply
one. This is only used from the constructor and may be
overridden in subclasses.
"""
names = self.names
if len(self.names) > 1:
return "{1} ({0})".format(*names[:2])
else:
return names[0]
def get_format_name(self, format):
"""
Get a name for this unit that is specific to a particular
format.
Uses the dictionary passed into the `format` kwarg in the
constructor.
Parameters
----------
format : str
The name of the format
Returns
-------
name : str
The name of the unit for the given format.
"""
return self._format.get(format, self.name)
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
return self._names
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
return self._names[0]
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
return self._names[1:]
@property
def short_names(self):
"""
Returns all of the short names associated with this unit.
"""
return self._short_names
@property
def long_names(self):
"""
Returns all of the long names associated with this unit.
"""
return self._long_names
def _inject(self, namespace=None):
"""
Injects the unit, and all of its aliases, in the given
namespace dictionary.
"""
if namespace is None:
return
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for name in self._names:
if name in namespace and self != namespace[name]:
raise ValueError(
"Object with name {0!r} already exists in "
"given namespace ({1!r}).".format(
name, namespace[name]))
for name in self._names:
namespace[name] = self
def _recreate_irreducible_unit(cls, names, registered):
"""
This is used to reconstruct units when passed around by
multiprocessing.
"""
registry = get_current_unit_registry().registry
if names[0] in registry:
# If in local registry return that object.
return registry[names[0]]
else:
# otherwise, recreate the unit.
unit = cls(names)
if registered:
# If not in local registry but registered in origin registry,
# enable unit in local registry.
get_current_unit_registry().add_enabled_units([unit])
return unit
class IrreducibleUnit(NamedUnit):
"""
Irreducible units are the units that all other units are defined
in terms of.
Examples are meters, seconds, kilograms, amperes, etc. There is
only once instance of such a unit per type.
"""
def __reduce__(self):
# When IrreducibleUnit objects are passed to other processes
# over multiprocessing, they need to be recreated to be the
# ones already in the subprocesses' namespace, not new
# objects, or they will be considered "unconvertible".
# Therefore, we have a custom pickler/unpickler that
# understands how to recreate the Unit on the other side.
registry = get_current_unit_registry().registry
return (_recreate_irreducible_unit,
(self.__class__, list(self.names), self.name in registry),
self.__dict__)
@property
def represents(self):
"""The unit that this named unit represents.
For an irreducible unit, that is always itself.
"""
return self
def decompose(self, bases=set()):
if len(bases) and self not in bases:
for base in bases:
try:
scale = self._to(base)
except UnitsError:
pass
else:
if is_effectively_unity(scale):
return base
else:
return CompositeUnit(scale, [base], [1],
_error_check=False)
raise UnitConversionError(
"Unit {0} can not be decomposed into the requested "
"bases".format(self))
return self
class UnrecognizedUnit(IrreducibleUnit):
"""
A unit that did not parse correctly. This allows for
roundtripping it as a string, but no unit operations actually work
on it.
Parameters
----------
st : str
The name of the unit.
"""
# For UnrecognizedUnits, we want to use "standard" Python
# pickling, not the special case that is used for
# IrreducibleUnits.
__reduce__ = object.__reduce__
def __repr__(self):
return "UnrecognizedUnit({0})".format(str(self))
def __bytes__(self):
return self.name.encode('ascii', 'replace')
def __str__(self):
return self.name
def to_string(self, format=None):
return self.name
def _unrecognized_operator(self, *args, **kwargs):
raise ValueError(
"The unit {0!r} is unrecognized, so all arithmetic operations "
"with it are invalid.".format(self.name))
__pow__ = __div__ = __rdiv__ = __truediv__ = __rtruediv__ = __mul__ = \
__rmul__ = __lt__ = __gt__ = __le__ = __ge__ = __neg__ = \
_unrecognized_operator
def __eq__(self, other):
other = Unit(other, parse_strict='silent')
return isinstance(other, UnrecognizedUnit) and self.name == other.name
def __ne__(self, other):
return not (self == other)
def is_equivalent(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
return self == other
def _get_converter(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
raise ValueError(
"The unit {0!r} is unrecognized. It can not be converted "
"to other units.".format(self.name))
def get_format_name(self, format):
return self.name
def is_unity(self):
return False
class _UnitMetaClass(InheritDocstrings):
"""
This metaclass exists because the Unit constructor should
sometimes return instances that already exist. This "overrides"
the constructor before the new instance is actually created, so we
can return an existing one.
"""
def __call__(self, s, represents=None, format=None, namespace=None,
doc=None, parse_strict='raise'):
# Short-circuit if we're already a unit
if hasattr(s, '_get_physical_type_id'):
return s
# turn possible Quantity input for s or represents into a Unit
from .quantity import Quantity
if isinstance(represents, Quantity):
if is_effectively_unity(represents.value):
represents = represents.unit
else:
# cannot use _error_check=False: scale may be effectively unity
represents = CompositeUnit(represents.value *
represents.unit.scale,
bases=represents.unit.bases,
powers=represents.unit.powers)
if isinstance(s, Quantity):
if is_effectively_unity(s.value):
s = s.unit
else:
s = CompositeUnit(s.value * s.unit.scale,
bases=s.unit.bases,
powers=s.unit.powers)
# now decide what we really need to do; define derived Unit?
if isinstance(represents, UnitBase):
# This has the effect of calling the real __new__ and
# __init__ on the Unit class.
return super().__call__(
s, represents, format=format, namespace=namespace, doc=doc)
# or interpret a Quantity (now became unit), string or number?
if isinstance(s, UnitBase):
return s
elif isinstance(s, (bytes, str)):
if len(s.strip()) == 0:
# Return the NULL unit
return dimensionless_unscaled
if format is None:
format = unit_format.Generic
f = unit_format.get_format(format)
if isinstance(s, bytes):
s = s.decode('ascii')
try:
return f.parse(s)
except Exception as e:
if parse_strict == 'silent':
pass
else:
# Deliberately not issubclass here. Subclasses
# should use their name.
if f is not unit_format.Generic:
format_clause = f.name + ' '
else:
format_clause = ''
msg = ("'{0}' did not parse as {1}unit: {2}"
.format(s, format_clause, str(e)))
if parse_strict == 'raise':
raise ValueError(msg)
elif parse_strict == 'warn':
warnings.warn(msg, UnitsWarning)
else:
raise ValueError("'parse_strict' must be 'warn', "
"'raise' or 'silent'")
return UnrecognizedUnit(s)
elif isinstance(s, (int, float, np.floating, np.integer)):
return CompositeUnit(s, [], [])
elif s is None:
raise TypeError("None is not a valid Unit")
else:
raise TypeError("{0} can not be converted to a Unit".format(s))
class Unit(NamedUnit, metaclass=_UnitMetaClass):
"""
The main unit class.
There are a number of different ways to construct a Unit, but
always returns a `UnitBase` instance. If the arguments refer to
an already-existing unit, that existing unit instance is returned,
rather than a new one.
- From a string::
Unit(s, format=None, parse_strict='silent')
Construct from a string representing a (possibly compound) unit.
The optional `format` keyword argument specifies the format the
string is in, by default ``"generic"``. For a description of
the available formats, see `astropy.units.format`.
The optional ``parse_strict`` keyword controls what happens when an
unrecognized unit string is passed in. It may be one of the following:
- ``'raise'``: (default) raise a ValueError exception.
- ``'warn'``: emit a Warning, and return an
`UnrecognizedUnit` instance.
- ``'silent'``: return an `UnrecognizedUnit` instance.
- From a number::
Unit(number)
Creates a dimensionless unit.
- From a `UnitBase` instance::
Unit(unit)
Returns the given unit unchanged.
- From `None`::
Unit()
Returns the null unit.
- The last form, which creates a new `Unit` is described in detail
below.
Parameters
----------
st : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance
The unit that this named unit represents.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
namespace : dictionary, optional
When provided, inject the unit (and all of its aliases) into
the given namespace.
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, represents=None, doc=None,
format=None, namespace=None):
represents = Unit(represents)
self._represents = represents
NamedUnit.__init__(self, st, namespace=namespace, doc=doc,
format=format)
@property
def represents(self):
"""The unit that this named unit represents."""
return self._represents
def decompose(self, bases=set()):
return self._represents.decompose(bases=bases)
def is_unity(self):
return self._represents.is_unity()
def __hash__(self):
return hash(self.name) + hash(self._represents)
@classmethod
def _from_physical_type_id(cls, physical_type_id):
# get string bases and powers from the ID tuple
bases = [cls(base) for base, _ in physical_type_id]
powers = [power for _, power in physical_type_id]
if len(physical_type_id) == 1 and powers[0] == 1:
unit = bases[0]
else:
unit = CompositeUnit(1, bases, powers)
return unit
class PrefixUnit(Unit):
"""
A unit that is simply a SI-prefixed version of another unit.
For example, ``mm`` is a `PrefixUnit` of ``.001 * m``.
The constructor is the same as for `Unit`.
"""
class CompositeUnit(UnitBase):
"""
Create a composite unit using expressions of previously defined
units.
Direct use of this class is not recommended. Instead use the
factory function `Unit` and arithmetic operators to compose
units.
Parameters
----------
scale : number
A scaling factor for the unit.
bases : sequence of `UnitBase`
A sequence of units this unit is composed of.
powers : sequence of numbers
A sequence of powers (in parallel with ``bases``) for each
of the base units.
"""
def __init__(self, scale, bases, powers, decompose=False,
decompose_bases=set(), _error_check=True):
# There are many cases internal to astropy.units where we
# already know that all the bases are Unit objects, and the
# powers have been validated. In those cases, we can skip the
# error checking for performance reasons. When the private
# kwarg `_error_check` is False, the error checking is turned
# off.
if _error_check:
scale = sanitize_scale(scale)
for base in bases:
if not isinstance(base, UnitBase):
raise TypeError(
"bases must be sequence of UnitBase instances")
powers = [validate_power(p) for p in powers]
self._scale = scale
self._bases = bases
self._powers = powers
self._decomposed_cache = None
self._expand_and_gather(decompose=decompose, bases=decompose_bases)
self._hash = None
def __repr__(self):
if len(self._bases):
return super().__repr__()
else:
if self._scale != 1.0:
return 'Unit(dimensionless with a scale of {0})'.format(
self._scale)
else:
return 'Unit(dimensionless)'
def __hash__(self):
if self._hash is None:
parts = ([str(self._scale)] +
[x.name for x in self._bases] +
[str(x) for x in self._powers])
self._hash = hash(tuple(parts))
return self._hash
@property
def scale(self):
"""
Return the scale of the composite unit.
"""
return self._scale
@property
def bases(self):
"""
Return the bases of the composite unit.
"""
return self._bases
@property
def powers(self):
"""
Return the powers of the composite unit.
"""
return self._powers
def _expand_and_gather(self, decompose=False, bases=set()):
def add_unit(unit, power, scale):
if bases and unit not in bases:
for base in bases:
try:
scale *= unit._to(base) ** power
except UnitsError:
pass
else:
unit = base
break
if unit in new_parts:
a, b = resolve_fractions(new_parts[unit], power)
new_parts[unit] = a + b
else:
new_parts[unit] = power
return scale
new_parts = {}
scale = self._scale
for b, p in zip(self._bases, self._powers):
if decompose and b not in bases:
b = b.decompose(bases=bases)
if isinstance(b, CompositeUnit):
scale *= b._scale ** p
for b_sub, p_sub in zip(b._bases, b._powers):
a, b = resolve_fractions(p_sub, p)
scale = add_unit(b_sub, a * b, scale)
else:
scale = add_unit(b, p, scale)
new_parts = [x for x in new_parts.items() if x[1] != 0]
new_parts.sort(key=lambda x: (-x[1], getattr(x[0], 'name', '')))
self._bases = [x[0] for x in new_parts]
self._powers = [x[1] for x in new_parts]
self._scale = sanitize_scale(scale)
def __copy__(self):
"""
For compatibility with python copy module.
"""
return CompositeUnit(self._scale, self._bases[:], self._powers[:])
def decompose(self, bases=set()):
if len(bases) == 0 and self._decomposed_cache is not None:
return self._decomposed_cache
for base in self.bases:
if (not isinstance(base, IrreducibleUnit) or
(len(bases) and base not in bases)):
break
else:
if len(bases) == 0:
self._decomposed_cache = self
return self
x = CompositeUnit(self.scale, self.bases, self.powers, decompose=True,
decompose_bases=bases)
if len(bases) == 0:
self._decomposed_cache = x
return x
def is_unity(self):
unit = self.decompose()
return len(unit.bases) == 0 and unit.scale == 1.0
si_prefixes = [
(['Y'], ['yotta'], 1e24),
(['Z'], ['zetta'], 1e21),
(['E'], ['exa'], 1e18),
(['P'], ['peta'], 1e15),
(['T'], ['tera'], 1e12),
(['G'], ['giga'], 1e9),
(['M'], ['mega'], 1e6),
(['k'], ['kilo'], 1e3),
(['h'], ['hecto'], 1e2),
(['da'], ['deka', 'deca'], 1e1),
(['d'], ['deci'], 1e-1),
(['c'], ['centi'], 1e-2),
(['m'], ['milli'], 1e-3),
(['u'], ['micro'], 1e-6),
(['n'], ['nano'], 1e-9),
(['p'], ['pico'], 1e-12),
(['f'], ['femto'], 1e-15),
(['a'], ['atto'], 1e-18),
(['z'], ['zepto'], 1e-21),
(['y'], ['yocto'], 1e-24)
]
binary_prefixes = [
(['Ki'], ['kibi'], 2. ** 10),
(['Mi'], ['mebi'], 2. ** 20),
(['Gi'], ['gibi'], 2. ** 30),
(['Ti'], ['tebi'], 2. ** 40),
(['Pi'], ['pebi'], 2. ** 50),
(['Ei'], ['exbi'], 2. ** 60)
]
def _add_prefixes(u, excludes=[], namespace=None, prefixes=False):
"""
Set up all of the standard metric prefixes for a unit. This
function should not be used directly, but instead use the
`prefixes` kwarg on `def_unit`.
Parameters
----------
excludes : list of str, optional
Any prefixes to exclude from creation to avoid namespace
collisions.
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace dictionary.
prefixes : list, optional
When provided, it is a list of prefix definitions of the form:
(short_names, long_tables, factor)
"""
if prefixes is True:
prefixes = si_prefixes
elif prefixes is False:
prefixes = []
for short, full, factor in prefixes:
names = []
format = {}
for prefix in short:
if prefix in excludes:
continue
for alias in u.short_names:
names.append(prefix + alias)
# This is a hack to use Greek mu as a prefix
# for some formatters.
if prefix == 'u':
format['latex'] = r'\mu ' + u.get_format_name('latex')
format['unicode'] = 'μ' + u.get_format_name('unicode')
for key, val in u._format.items():
format.setdefault(key, prefix + val)
for prefix in full:
if prefix in excludes:
continue
for alias in u.long_names:
names.append(prefix + alias)
if len(names):
PrefixUnit(names, CompositeUnit(factor, [u], [1],
_error_check=False),
namespace=namespace, format=format)
def def_unit(s, represents=None, doc=None, format=None, prefixes=False,
exclude_prefixes=[], namespace=None):
"""
Factory function for defining new units.
Parameters
----------
s : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance, optional
The unit that this named unit represents. If not provided,
a new `IrreducibleUnit` is created.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to
have it displayed as ``\\Omega`` by the ``latex``
formatter. In that case, `format` argument should be set
to::
{'latex': r'\\Omega'}
prefixes : bool or list, optional
When `True`, generate all of the SI prefixed versions of the
unit as well. For example, for a given unit ``m``, will
generate ``mm``, ``cm``, ``km``, etc. When a list, it is a list of
prefix definitions of the form:
(short_names, long_tables, factor)
Default is `False`. This function always returns the base
unit object, even if multiple scaled versions of the unit were
created.
exclude_prefixes : list of str, optional
If any of the SI prefixes need to be excluded, they may be
listed here. For example, ``Pa`` can be interpreted either as
"petaannum" or "Pascal". Therefore, when defining the
prefixes for ``a``, ``exclude_prefixes`` should be set to
``["P"]``.
namespace : dict, optional
When provided, inject the unit (and all of its aliases and
prefixes), into the given namespace dictionary.
Returns
-------
unit : `UnitBase` object
The newly-defined unit, or a matching unit that was already
defined.
"""
if represents is not None:
result = Unit(s, represents, namespace=namespace, doc=doc,
format=format)
else:
result = IrreducibleUnit(
s, namespace=namespace, doc=doc, format=format)
if prefixes:
_add_prefixes(result, excludes=exclude_prefixes, namespace=namespace,
prefixes=prefixes)
return result
def _condition_arg(value):
"""
Validate value is acceptable for conversion purposes.
Will convert into an array if not a scalar, and can be converted
into an array
Parameters
----------
value : int or float value, or sequence of such values
Returns
-------
Scalar value or numpy array
Raises
------
ValueError
If value is not as expected
"""
if isinstance(value, (float, int, complex)):
return value
if isinstance(value, np.ndarray) and value.dtype.kind in ['i', 'f', 'c']:
return value
avalue = np.array(value)
if avalue.dtype.kind not in ['i', 'f', 'c']:
raise ValueError("Value not scalar compatible or convertible to "
"an int, float, or complex array")
return avalue
dimensionless_unscaled = CompositeUnit(1, [], [], _error_check=False)
# Abbreviation of the above, see #1980
one = dimensionless_unscaled
# Maintain error in old location for backward compatibility
# TODO: Is this still needed? Should there be a deprecation warning?
unit_format.fits.UnitScaleError = UnitScaleError
|
09bf3d2decccf7aaa41bb68f61760fb564add6320756c419e1ff008c27447b10 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Miscellaneous utilities for `astropy.units`.
None of the functions in the module are meant for use outside of the
package.
"""
import numbers
import io
import re
from fractions import Fraction
import numpy as np
from numpy import finfo
_float_finfo = finfo(float)
# take float here to ensure comparison with another float is fast
# give a little margin since often multiple calculations happened
_JUST_BELOW_UNITY = float(1.-4.*_float_finfo.epsneg)
_JUST_ABOVE_UNITY = float(1.+4.*_float_finfo.eps)
def _get_first_sentence(s):
"""
Get the first sentence from a string and remove any carriage
returns.
"""
x = re.match(r".*?\S\.\s", s)
if x is not None:
s = x.group(0)
return s.replace('\n', ' ')
def _iter_unit_summary(namespace):
"""
Generates the ``(unit, doc, represents, aliases, prefixes)``
tuple used to format the unit summary docs in `generate_unit_summary`.
"""
from . import core
# Get all of the units, and keep track of which ones have SI
# prefixes
units = []
has_prefixes = set()
for key, val in namespace.items():
# Skip non-unit items
if not isinstance(val, core.UnitBase):
continue
# Skip aliases
if key != val.name:
continue
if isinstance(val, core.PrefixUnit):
# This will return the root unit that is scaled by the prefix
# attached to it
has_prefixes.add(val._represents.bases[0].name)
else:
units.append(val)
# Sort alphabetically, case insensitive
units.sort(key=lambda x: x.name.lower())
for unit in units:
doc = _get_first_sentence(unit.__doc__).strip()
represents = ''
if isinstance(unit, core.Unit):
represents = ":math:`{0}`".format(
unit._represents.to_string('latex')[1:-1])
aliases = ', '.join('``{0}``'.format(x) for x in unit.aliases)
yield (unit, doc, represents, aliases, 'Yes' if unit.name in has_prefixes else 'No')
def generate_unit_summary(namespace):
"""
Generates a summary of units from a given namespace. This is used
to generate the docstring for the modules that define the actual
units.
Parameters
----------
namespace : dict
A namespace containing units.
Returns
-------
docstring : str
A docstring containing a summary table of the units.
"""
docstring = io.StringIO()
docstring.write("""
.. list-table:: Available Units
:header-rows: 1
:widths: 10 20 20 20 1
* - Unit
- Description
- Represents
- Aliases
- SI Prefixes
""")
for unit_summary in _iter_unit_summary(namespace):
docstring.write("""
* - ``{0}``
- {1}
- {2}
- {3}
- {4}
""".format(*unit_summary))
return docstring.getvalue()
def generate_prefixonly_unit_summary(namespace):
"""
Generates table entries for units in a namespace that are just prefixes
without the base unit. Note that this is intended to be used *after*
`generate_unit_summary` and therefore does not include the table header.
Parameters
----------
namespace : dict
A namespace containing units that are prefixes but do *not* have the
base unit in their namespace.
Returns
-------
docstring : str
A docstring containing a summary table of the units.
"""
from . import PrefixUnit
faux_namespace = {}
for nm, unit in namespace.items():
if isinstance(unit, PrefixUnit):
base_unit = unit.represents.bases[0]
faux_namespace[base_unit.name] = base_unit
docstring = io.StringIO()
for unit_summary in _iter_unit_summary(faux_namespace):
docstring.write("""
* - Prefixes for ``{0}``
- {1} prefixes
- {2}
- {3}
- Only
""".format(*unit_summary))
return docstring.getvalue()
def is_effectively_unity(value):
# value is *almost* always real, except, e.g., for u.mag**0.5, when
# it will be complex. Use try/except to ensure normal case is fast
try:
return _JUST_BELOW_UNITY <= value <= _JUST_ABOVE_UNITY
except TypeError: # value is complex
return (_JUST_BELOW_UNITY <= value.real <= _JUST_ABOVE_UNITY and
_JUST_BELOW_UNITY <= value.imag + 1 <= _JUST_ABOVE_UNITY)
def sanitize_scale(scale):
if is_effectively_unity(scale):
return 1.0
if np.iscomplex(scale): # scale is complex
if scale == 0.0:
return 0.0
if abs(scale.real) > abs(scale.imag):
if is_effectively_unity(scale.imag/scale.real + 1):
scale = scale.real
else:
if is_effectively_unity(scale.real/scale.imag + 1):
scale = complex(0., scale.imag)
return scale
def validate_power(p, support_tuples=False):
"""Convert a power to a floating point value, an integer, or a Fraction.
If a fractional power can be represented exactly as a floating point
number, convert it to a float, to make the math much faster; otherwise,
retain it as a `fractions.Fraction` object to avoid losing precision.
Conversely, if the value is indistinguishable from a rational number with a
low-numbered denominator, convert to a Fraction object.
Parameters
----------
p : float, int, Rational, Fraction
Power to be converted
"""
denom = getattr(p, 'denominator', None)
if denom is None:
try:
p = float(p)
except Exception:
if not np.isscalar(p):
raise ValueError("Quantities and Units may only be raised "
"to a scalar power")
else:
raise
if (p % 1.0) == 0.0:
# Denominators of 1 can just be integers.
p = int(p)
elif (p * 8.0) % 1.0 == 0.0:
# Leave alone if the denominator is exactly 2, 4 or 8, since this
# can be perfectly represented as a float, which means subsequent
# operations are much faster.
pass
else:
# Convert floats indistinguishable from a rational to Fraction.
# Here, we do not need to test values that are divisors of a higher
# number, such as 3, since it is already addressed by 6.
for i in (10, 9, 7, 6):
scaled = p * float(i)
if((scaled + 4. * _float_finfo.eps) % 1.0 <
8. * _float_finfo.eps):
p = Fraction(int(round(scaled)), i)
break
elif denom == 1:
p = int(p.numerator)
elif (denom & (denom - 1)) == 0:
# Above is a bit-twiddling hack to see if denom is a power of two.
p = float(p)
return p
def resolve_fractions(a, b):
"""
If either input is a Fraction, convert the other to a Fraction.
This ensures that any operation involving a Fraction will use
rational arithmetic and preserve precision.
"""
a_is_fraction = isinstance(a, Fraction)
b_is_fraction = isinstance(b, Fraction)
if a_is_fraction and not b_is_fraction:
b = Fraction(b)
elif not a_is_fraction and b_is_fraction:
a = Fraction(a)
return a, b
def quantity_asanyarray(a, dtype=None):
from .quantity import Quantity
if not isinstance(a, np.ndarray) and not np.isscalar(a) and any(isinstance(x, Quantity) for x in a):
return Quantity(a, dtype=dtype)
else:
return np.asanyarray(a, dtype=dtype)
|
e83939856bb2eef50ab5c832c08a84e93945fcb37abc24233ce6b5e39456db71 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the `Quantity` object, which represents a number with some
associated units. `Quantity` objects support operations like ordinary numbers,
but will deal with unit conversions internally.
"""
# Standard library
import re
import numbers
from fractions import Fraction
import warnings
import numpy as np
# AstroPy
from .core import (Unit, dimensionless_unscaled, get_current_unit_registry,
UnitBase, UnitsError, UnitTypeError)
from .utils import is_effectively_unity
from .format.latex import Latex
from ..utils.compat import NUMPY_LT_1_14
from ..utils.compat.misc import override__dir__
from ..utils.exceptions import AstropyDeprecationWarning
from ..utils.misc import isiterable, InheritDocstrings
from ..utils.data_info import ParentDtypeInfo
from .. import config as _config
from .quantity_helper import (converters_and_unit, can_have_arbitrary_unit,
check_output)
__all__ = ["Quantity", "SpecificTypeQuantity",
"QuantityInfoBase", "QuantityInfo", "allclose", "isclose"]
# We don't want to run doctests in the docstrings we inherit from Numpy
__doctest_skip__ = ['Quantity.*']
_UNIT_NOT_INITIALISED = "(Unit not initialised)"
_UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for Quantity
"""
latex_array_threshold = _config.ConfigItem(100,
'The maximum size an array Quantity can be before its LaTeX '
'representation for IPython gets "summarized" (meaning only the first '
'and last few elements are shown with "..." between). Setting this to a '
'negative number means that the value will instead be whatever numpy '
'gets from get_printoptions.')
conf = Conf()
class QuantityIterator:
"""
Flat iterator object to iterate over Quantities
A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity
``q``. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
Quantity.flatten : Returns a flattened copy of an array.
Notes
-----
`QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It
is not exported by the `~astropy.units` module. Instead of
instantiating a `QuantityIterator` directly, use `Quantity.flat`.
"""
def __init__(self, q):
self._quantity = q
self._dataiter = q.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Quantity.
if isinstance(out, type(self._quantity)):
return out
else:
return self._quantity._new_view(out)
def __setitem__(self, index, value):
self._dataiter[index] = self._quantity._to_own_unit(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)
# ndarray.flat._dataiter returns scalars, so need a view as a Quantity.
return self._quantity._new_view(out)
next = __next__
class QuantityInfoBase(ParentDtypeInfo):
# This is on a base class rather than QuantityInfo directly, so that
# it can be used for EarthLocationInfo yet make clear that that class
# should not be considered a typical Quantity subclass by Table.
attrs_from_parent = {'dtype', 'unit'} # dtype and unit taken from parent
_supports_indexing = True
@staticmethod
def default_format(val):
return '{0.value:}'.format(val)
@staticmethod
def possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
This method is overridden in order to suppress printing the unit
in each row since it is already at the top in the column header.
"""
yield lambda format_, val: format(val.value, format_)
yield lambda format_, val: format_.format(val.value)
yield lambda format_, val: format_ % val.value
class QuantityInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ('value', 'unit')
_construct_from_dict_args = ['value']
_represent_as_dict_primary_data = 'value'
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Quantity instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Quantity (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'format', 'description'))
# Make an empty quantity using the unit of the last one.
shape = (length,) + attrs.pop('shape')
dtype = attrs.pop('dtype')
# Use zeros so we do not get problems for Quantity subclasses such
# as Longitude and Latitude, which cannot take arbitrary values.
data = np.zeros(shape=shape, dtype=dtype)
# Get arguments needed to reconstruct class
map = {key: (data if key == 'value' else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs}
map['copy'] = False
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class Quantity(np.ndarray, metaclass=InheritDocstrings):
"""A `~astropy.units.Quantity` represents a number with some associated unit.
Parameters
----------
value : number, `~numpy.ndarray`, `Quantity` object (sequence), str
The numerical value of this quantity in the units given by unit. If a
`Quantity` or sequence of them (or any other valid object with a
``unit`` attribute), creates a new `Quantity` object, converting to
`unit` units as needed. If a string, it is converted to a number or
`Quantity`, depending on whether a unit is present.
unit : `~astropy.units.UnitBase` instance, str
An object that represents the unit associated with the input value.
Must be an `~astropy.units.UnitBase` object or a string parseable by
the :mod:`~astropy.units` package.
dtype : ~numpy.dtype, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any input that cannot represent float (integer and bool)
is converted to float.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. This parameter
is ignored if the input is a `Quantity` and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be a
`Quantity`. Otherwise, `Quantity` subclasses will be passed through,
or a subclass appropriate for the unit will be used (such as
`~astropy.units.Dex` for ``u.dex(u.AA)``).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be pre-pended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not either a :class:`~astropy.units.Unit`
object or a parseable string unit.
Notes
-----
Quantities can also be created by multiplying a number or array with a
:class:`~astropy.units.Unit`. See http://docs.astropy.org/en/latest/units/
"""
# Need to set a class-level default for _equivalencies, or
# Constants can not initialize properly
_equivalencies = []
# Default unit for initialization; can be overridden by subclasses,
# possibly to `None` to indicate there is no default unit.
_default_unit = dimensionless_unscaled
# Ensures views have an undefined unit.
_unit = None
__array_priority__ = 10000
def __new__(cls, value, unit=None, dtype=None, copy=True, order=None,
subok=False, ndmin=0):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# if we allow subclasses, allow a class from the unit.
if subok:
qcls = getattr(unit, '_quantity_class', cls)
if issubclass(qcls, cls):
cls = qcls
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if type(value) is not cls and not (subok and
isinstance(value, cls)):
value = value.view(cls)
if dtype is None:
if not copy:
return value
if not np.can_cast(np.float32, value.dtype):
dtype = float
return np.array(value, dtype=dtype, copy=copy, order=order,
subok=True, ndmin=ndmin)
# Maybe str, or list/tuple of Quantity? If so, this may set value_unit.
# To ensure array remains fast, we short-circuit it.
value_unit = None
if not isinstance(value, np.ndarray):
if isinstance(value, str):
# The first part of the regex string matches any integer/float;
# the second parts adds possible trailing .+-, which will break
# the float function below and ensure things like 1.2.3deg
# will not work.
pattern = (r'\s*[+-]?'
r'((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|'
r'([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))'
r'([eE][+-]?\d+)?'
r'[.+-]?')
v = re.match(pattern, value)
unit_string = None
try:
value = float(v.group())
except Exception:
raise TypeError('Cannot parse "{0}" as a {1}. It does not '
'start with a number.'
.format(value, cls.__name__))
unit_string = v.string[v.end():].strip()
if unit_string:
value_unit = Unit(unit_string)
if unit is None:
unit = value_unit # signal no conversion needed below.
elif (isiterable(value) and len(value) > 0 and
all(isinstance(v, Quantity) for v in value)):
# Convert all quantities to the same unit.
if unit is None:
unit = value[0].unit
value = [q.to_value(unit) for q in value]
value_unit = unit # signal below that conversion has been done
if value_unit is None:
# If the value has a `unit` attribute and if not None
# (for Columns with uninitialized unit), treat it like a quantity.
value_unit = getattr(value, 'unit', None)
if value_unit is None:
# Default to dimensionless for no (initialized) unit attribute.
if unit is None:
unit = cls._default_unit
value_unit = unit # signal below that no conversion is needed
else:
try:
value_unit = Unit(value_unit)
except Exception as exc:
raise TypeError("The unit attribute {0!r} of the input could "
"not be parsed as an astropy Unit, raising "
"the following exception:\n{1}"
.format(value.unit, exc))
if unit is None:
unit = value_unit
elif unit is not value_unit:
copy = False # copy will be made in conversion at end
value = np.array(value, dtype=dtype, copy=copy, order=order,
subok=False, ndmin=ndmin)
# check that array contains numbers or long int objects
if (value.dtype.kind in 'OSU' and
not (value.dtype.kind == 'O' and
isinstance(value.item(() if value.ndim == 0 else 0),
numbers.Number))):
raise TypeError("The value must be a valid Python or "
"Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if dtype is None and (not np.can_cast(np.float32, value.dtype)
or value.dtype.kind == 'O'):
value = value.astype(float)
value = value.view(cls)
value._set_unit(value_unit)
if unit is value_unit:
return value
else:
# here we had non-Quantity input that had a "unit" attribute
# with a unit different from the desired one. So, convert.
return value.to(unit)
def __array_finalize__(self, obj):
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# If our unit is not set and obj has a valid one, use it.
if self._unit is None:
unit = getattr(obj, '_unit', None)
if unit is not None:
self._set_unit(unit)
# Copy info if the original had `info` defined. Because of the way the
# DataInfo works, `'info' in obj.__dict__` is False until the
# `info` attribute is accessed or set.
if 'info' in obj.__dict__:
self.info = obj.info
def __array_wrap__(self, obj, context=None):
if context is None:
# Methods like .squeeze() created a new `ndarray` and then call
# __array_wrap__ to turn the array into self's subclass.
return self._new_view(obj)
raise NotImplementedError('__array_wrap__ should not be used '
'with a context any more, since we require '
'numpy >=1.13. Please raise an issue on '
'https://github.com/astropy/astropy')
def __array_ufunc__(self, function, method, *inputs, **kwargs):
"""Wrap numpy ufuncs, taking care of units.
Parameters
----------
function : callable
ufunc to wrap.
method : str
Ufunc method: ``__call__``, ``at``, ``reduce``, etc.
inputs : tuple
Input arrays.
kwargs : keyword arguments
As passed on, with ``out`` containing possible quantity output.
Returns
-------
result : `~astropy.units.Quantity`
Results of the ufunc, with the unit set properly.
"""
# Determine required conversion functions -- to bring the unit of the
# input to that expected (e.g., radian for np.sin), or to get
# consistent units between two inputs (e.g., in np.add) --
# and the unit of the result (or tuple of units for nout > 1).
converters, unit = converters_and_unit(function, method, *inputs)
out = kwargs.get('out', None)
# Avoid loop back by turning any Quantity output into array views.
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
if function.nout == 1:
out = out[0]
out_array = check_output(out, unit, inputs, function=function)
# Ensure output argument remains a tuple.
kwargs['out'] = (out_array,) if function.nout == 1 else out_array
# Same for inputs, but here also convert if necessary.
arrays = [(converter(input_.value) if converter else
getattr(input_, 'value', input_))
for input_, converter in zip(inputs, converters)]
# Call our superclass's __array_ufunc__
result = super().__array_ufunc__(function, method, *arrays, **kwargs)
# If unit is None, a plain array is expected (e.g., comparisons), which
# means we're done.
# We're also done if the result was None (for method 'at') or
# NotImplemented, which can happen if other inputs/outputs override
# __array_ufunc__; hopefully, they can then deal with us.
if unit is None or result is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out)
def _result_as_quantity(self, result, unit, out):
"""Turn result into a quantity with the given unit.
If no output is given, it will take a view of the array as a quantity,
and set the unit. If output is given, those should be quantity views
of the result arrays, and the function will just set the unit.
Parameters
----------
result : `~numpy.ndarray` or tuple of `~numpy.ndarray`
Array(s) which need to be turned into quantity.
unit : `~astropy.units.Unit` or None
Unit for the quantities to be returned (or `None` if the result
should not be a quantity). Should be tuple if result is a tuple.
out : `~astropy.units.Quantity` or None
Possible output quantity. Should be `None` or a tuple if result
is a tuple.
Returns
-------
out : `~astropy.units.Quantity`
With units set.
"""
if isinstance(result, tuple):
if out is None:
out = (None,) * len(result)
return tuple(self._result_as_quantity(result_, unit_, out_)
for (result_, unit_, out_) in
zip(result, unit, out))
if out is None:
# View the result array as a Quantity with the proper unit.
return result if unit is None else self._new_view(result, unit)
# For given output, just set the unit. We know the unit is not None and
# the output is of the correct Quantity subclass, as it was passed
# through check_output.
out._set_unit(unit)
return out
def __quantity_subclass__(self, unit):
"""
Overridden by subclasses to change what kind of view is
created based on the output unit of an operation.
Parameters
----------
unit : UnitBase
The unit for which the appropriate class should be returned
Returns
-------
tuple :
- `Quantity` subclass
- bool: True if subclasses of the given class are ok
"""
return Quantity, True
def _new_view(self, obj=None, unit=None):
"""
Create a Quantity view of some array-like input, and set the unit
By default, return a view of ``obj`` of the same class as ``self`` and
with the same unit. Subclasses can override the type of class for a
given unit using ``__quantity_subclass__``, and can ensure properties
other than the unit are copied using ``__array_finalize__``.
If the given unit defines a ``_quantity_class`` of which ``self``
is not an instance, a view using this class is taken.
Parameters
----------
obj : ndarray or scalar, optional
The array to create a view of. If obj is a numpy or python scalar,
it will be converted to an array scalar. By default, ``self``
is converted.
unit : `UnitBase`, or anything convertible to a :class:`~astropy.units.Unit`, optional
The unit of the resulting object. It is used to select a
subclass, and explicitly assigned to the view if given.
If not given, the subclass and unit will be that of ``self``.
Returns
-------
view : Quantity subclass
"""
# Determine the unit and quantity subclass that we need for the view.
if unit is None:
unit = self.unit
quantity_subclass = self.__class__
else:
# In principle, could gain time by testing unit is self.unit
# as well, and then quantity_subclass = self.__class__, but
# Constant relies on going through `__quantity_subclass__`.
unit = Unit(unit)
quantity_subclass = getattr(unit, '_quantity_class', Quantity)
if isinstance(self, quantity_subclass):
quantity_subclass, subok = self.__quantity_subclass__(unit)
if subok:
quantity_subclass = self.__class__
# We only want to propagate information from ``self`` to our new view,
# so obj should be a regular array. By using ``np.array``, we also
# convert python and numpy scalars, which cannot be viewed as arrays
# and thus not as Quantity either, to zero-dimensional arrays.
# (These are turned back into scalar in `.value`)
# Note that for an ndarray input, the np.array call takes only double
# ``obj.__class is np.ndarray``. So, not worth special-casing.
if obj is None:
obj = self.view(np.ndarray)
else:
obj = np.array(obj, copy=False)
# Take the view, set the unit, and update possible other properties
# such as ``info``, ``wrap_angle`` in `Longitude`, etc.
view = obj.view(quantity_subclass)
view._set_unit(unit)
view.__array_finalize__(self)
return view
def _set_unit(self, unit):
"""Set the unit.
This is used anywhere the unit is set or modified, i.e., in the
initilizer, in ``__imul__`` and ``__itruediv__`` for in-place
multiplication and division by another unit, as well as in
``__array_finalize__`` for wrapping up views. For Quantity, it just
sets the unit, but subclasses can override it to check that, e.g.,
a unit is consistent.
"""
if not isinstance(unit, UnitBase):
# Trying to go through a string ensures that, e.g., Magnitudes with
# dimensionless physical unit become Quantity with units of mag.
unit = Unit(str(unit), parse_strict='silent')
if not isinstance(unit, UnitBase):
raise UnitTypeError(
"{0} instances require {1} units, not {2} instances."
.format(type(self).__name__, UnitBase, type(unit)))
self._unit = unit
def __deepcopy__(self, memo):
# If we don't define this, ``copy.deepcopy(quantity)`` will
# return a bare Numpy array.
return self.copy()
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
info = QuantityInfo()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
if equivalencies == []:
equivalencies = self._equivalencies
return self.unit.to(unit, self.view(np.ndarray),
equivalencies=equivalencies)
def to(self, unit, equivalencies=[]):
"""
Return a new `~astropy.units.Quantity` object with the specified unit.
Parameters
----------
unit : `~astropy.units.UnitBase` instance, str
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
If not provided or ``[]``, class default equivalencies will be used
(none for `~astropy.units.Quantity`, but may be set for subclasses)
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
See also
--------
to_value : get the numerical value in a given unit.
"""
# We don't use `to_value` below since we always want to make a copy
# and don't want to slow down this method (esp. the scalar case).
unit = Unit(unit)
return self._new_view(self._to_value(unit, equivalencies), unit)
def to_value(self, unit=None, equivalencies=[]):
"""
The numerical value, possibly in a different unit.
Parameters
----------
unit : `~astropy.units.UnitBase` instance or str, optional
The unit in which the value should be given. If not given or `None`,
use the current unit.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`unit_equivalencies`). If not provided or
``[]``, class default equivalencies will be used (none for
`~astropy.units.Quantity`, but may be set for subclasses).
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
Returns
-------
value : `~numpy.ndarray` or scalar
The value in the units specified. For arrays, this will be a view
of the data if no unit conversion was necessary.
See also
--------
to : Get a new instance in a different unit.
"""
if unit is None or unit is self.unit:
value = self.view(np.ndarray)
else:
unit = Unit(unit)
# We want a view if the unit does not change. One could check
# with "==", but that calculates the scale that we need anyway.
# TODO: would be better for `unit.to` to have an in-place flag.
try:
scale = self.unit._to(unit)
except Exception:
# Short-cut failed; try default (maybe equivalencies help).
value = self._to_value(unit, equivalencies)
else:
value = self.view(np.ndarray)
if not is_effectively_unity(scale):
# not in-place!
value = value * scale
return value if self.shape else (value[()] if self.dtype.fields
else value.item())
value = property(to_value,
doc="""The numerical value of this instance.
See also
--------
to_value : Get the numerical value in a given unit.
""")
@property
def unit(self):
"""
A `~astropy.units.UnitBase` object representing the unit of this
quantity.
"""
return self._unit
@property
def equivalencies(self):
"""
A list of equivalencies that will be applied by default during
unit conversions.
"""
return self._equivalencies
@property
def si(self):
"""
Returns a copy of the current `Quantity` instance with SI units. The
value of the resulting object will be scaled.
"""
si_unit = self.unit.si
return self._new_view(self.value * si_unit.scale,
si_unit / si_unit.scale)
@property
def cgs(self):
"""
Returns a copy of the current `Quantity` instance with CGS units. The
value of the resulting object will be scaled.
"""
cgs_unit = self.unit.cgs
return self._new_view(self.value * cgs_unit.scale,
cgs_unit / cgs_unit.scale)
@property
def isscalar(self):
"""
True if the `value` of this quantity is a scalar, or False if it
is an array-like object.
.. note::
This is subtly different from `numpy.isscalar` in that
`numpy.isscalar` returns False for a zero-dimensional array
(e.g. ``np.array(1)``), while this is True for quantities,
since quantities cannot represent true numpy scalars.
"""
return not self.shape
# This flag controls whether convenience conversion members, such
# as `q.m` equivalent to `q.to_value(u.m)` are available. This is
# not turned on on Quantity itself, but is on some subclasses of
# Quantity, such as `astropy.coordinates.Angle`.
_include_easy_conversion_members = False
@override__dir__
def __dir__(self):
"""
Quantities are able to directly convert to other units that
have the same physical type. This function is implemented in
order to make autocompletion still work correctly in IPython.
"""
if not self._include_easy_conversion_members:
return []
extra_members = set()
equivalencies = Unit._normalize_equivalencies(self.equivalencies)
for equivalent in self.unit._get_units_with_same_physical_type(
equivalencies):
extra_members.update(equivalent.names)
return extra_members
def __getattr__(self, attr):
"""
Quantities are able to directly convert to other units that
have the same physical type.
"""
if not self._include_easy_conversion_members:
raise AttributeError(
"'{0}' object has no '{1}' member".format(
self.__class__.__name__,
attr))
def get_virtual_unit_attribute():
registry = get_current_unit_registry().registry
to_unit = registry.get(attr, None)
if to_unit is None:
return None
try:
return self.unit.to(
to_unit, self.value, equivalencies=self.equivalencies)
except UnitsError:
return None
value = get_virtual_unit_attribute()
if value is None:
raise AttributeError(
"{0} instance has no attribute '{1}'".format(
self.__class__.__name__, attr))
else:
return value
# Equality (return False if units do not match) needs to be handled
# explicitly for numpy >=1.9, since it no longer traps errors.
def __eq__(self, other):
try:
try:
return super().__eq__(other)
except DeprecationWarning:
# We treat the DeprecationWarning separately, since it may
# mask another Exception. But we do not want to just use
# np.equal, since super's __eq__ treats recarrays correctly.
return np.equal(self, other)
except UnitsError:
return False
except TypeError:
return NotImplemented
def __ne__(self, other):
try:
try:
return super().__ne__(other)
except DeprecationWarning:
return np.not_equal(self, other)
except UnitsError:
return True
except TypeError:
return NotImplemented
# Arithmetic operations
def __mul__(self, other):
""" Multiplication between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), other * self.unit)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__mul__(other)
def __imul__(self, other):
"""In-place multiplication between `Quantity` objects and others."""
if isinstance(other, (UnitBase, str)):
self._set_unit(other * self.unit)
return self
return super().__imul__(other)
def __rmul__(self, other):
""" Right Multiplication between `Quantity` objects and other
objects.
"""
return self.__mul__(other)
def __truediv__(self, other):
""" Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), self.unit / other)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__truediv__(other)
def __itruediv__(self, other):
"""Inplace division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
self._set_unit(self.unit / other)
return self
return super().__itruediv__(other)
def __rtruediv__(self, other):
""" Right Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
return self._new_view(1. / self.value, other / self.unit)
return super().__rtruediv__(other)
def __div__(self, other):
""" Division between `Quantity` objects. """
return self.__truediv__(other)
def __idiv__(self, other):
""" Division between `Quantity` objects. """
return self.__itruediv__(other)
def __rdiv__(self, other):
""" Division between `Quantity` objects. """
return self.__rtruediv__(other)
def __pow__(self, other):
if isinstance(other, Fraction):
# Avoid getting object arrays by raising the value to a Fraction.
return self._new_view(self.value ** float(other),
self.unit ** other)
return super().__pow__(other)
# For Py>=3.5
def __matmul__(self, other, reverse=False):
result_unit = self.unit * getattr(other, 'unit', dimensionless_unscaled)
result_array = np.matmul(self.value, getattr(other, 'value', other))
return self._new_view(result_array, result_unit)
def __rmatmul__(self, other):
result_unit = self.unit * getattr(other, 'unit', dimensionless_unscaled)
result_array = np.matmul(getattr(other, 'value', other), self.value)
return self._new_view(result_array, result_unit)
# In numpy 1.13, 1.14, a np.positive ufunc exists, but ndarray.__pos__
# does not go through it, so we define it, to allow subclasses to override
# it inside __array_ufunc__. This can be removed if a solution to
# https://github.com/numpy/numpy/issues/9081 is merged.
def __pos__(self):
"""Plus the quantity."""
return np.positive(self)
# other overrides of special functions
def __hash__(self):
return hash(self.value) ^ hash(self.unit)
def __iter__(self):
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value is not iterable"
.format(cls=self.__class__.__name__))
# Otherwise return a generator
def quantity_iter():
for val in self.value:
yield self._new_view(val)
return quantity_iter()
def __getitem__(self, key):
try:
out = super().__getitem__(key)
except IndexError:
# We want zero-dimensional Quantity objects to behave like scalars,
# so they should raise a TypeError rather than an IndexError.
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value does not support "
"indexing".format(cls=self.__class__.__name__))
else:
raise
# For single elements, ndarray.__getitem__ returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def __setitem__(self, i, value):
# update indices in info if the info property has been accessed
# (in which case 'info' in self.__dict__ is True; this is guaranteed
# to be the case if we're part of a table).
if not self.isscalar and 'info' in self.__dict__:
self.info.adjust_indices(i, value, len(self))
self.view(np.ndarray).__setitem__(i, self._to_own_unit(value))
# __contains__ is OK
def __bool__(self):
"""Quantities should always be treated as non-False; there is too much
potential for ambiguity otherwise.
"""
warnings.warn('The truth value of a Quantity is ambiguous. '
'In the future this will raise a ValueError.',
AstropyDeprecationWarning)
return True
def __len__(self):
if self.isscalar:
raise TypeError("'{cls}' object with a scalar value has no "
"len()".format(cls=self.__class__.__name__))
else:
return len(self.value)
# Numerical types
def __float__(self):
try:
return float(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __int__(self):
try:
return int(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __index__(self):
# for indices, we do not want to mess around with scaling at all,
# so unlike for float, int, we insist here on unscaled dimensionless
try:
assert self.unit.is_unity()
return self.value.__index__()
except Exception:
raise TypeError('only integer dimensionless scalar quantities '
'can be converted to a Python index')
@property
def _unitstr(self):
if self.unit is None:
unitstr = _UNIT_NOT_INITIALISED
else:
unitstr = str(self.unit)
if unitstr:
unitstr = ' ' + unitstr
return unitstr
# Display
# TODO: we may want to add a hook for dimensionless quantities?
def __str__(self):
return '{0}{1:s}'.format(self.value, self._unitstr)
def __repr__(self):
prefixstr = '<' + self.__class__.__name__ + ' '
sep = ',' if NUMPY_LT_1_14 else ', '
arrstr = np.array2string(self.view(np.ndarray), separator=sep,
prefix=prefixstr)
return '{0}{1}{2:s}>'.format(prefixstr, arrstr, self._unitstr)
def _repr_latex_(self):
"""
Generate a latex representation of the quantity and its unit.
The behavior of this function can be altered via the
`numpy.set_printoptions` function and its various keywords. The
exception to this is the ``threshold`` keyword, which is controlled via
the ``[units.quantity]`` configuration item ``latex_array_threshold``.
This is treated separately because the numpy default of 1000 is too big
for most browsers to handle.
Returns
-------
lstr
A LaTeX string with the contents of this Quantity
"""
# need to do try/finally because "threshold" cannot be overridden
# with array2string
pops = np.get_printoptions()
format_spec = '.{}g'.format(pops['precision'])
def float_formatter(value):
return Latex.format_exponential_notation(value,
format_spec=format_spec)
def complex_formatter(value):
return '({0}{1}i)'.format(
Latex.format_exponential_notation(value.real,
format_spec=format_spec),
Latex.format_exponential_notation(value.imag,
format_spec='+' + format_spec))
try:
formatter = {'float_kind': float_formatter,
'complex_kind': complex_formatter}
if conf.latex_array_threshold > -1:
np.set_printoptions(threshold=conf.latex_array_threshold,
formatter=formatter)
# the view is needed for the scalar case - value might be float
if NUMPY_LT_1_14: # style deprecated in 1.14
latex_value = np.array2string(
self.view(np.ndarray),
style=(float_formatter if self.dtype.kind == 'f'
else complex_formatter if self.dtype.kind == 'c'
else repr),
max_line_width=np.inf, separator=',~')
else:
latex_value = np.array2string(
self.view(np.ndarray),
max_line_width=np.inf, separator=',~')
latex_value = latex_value.replace('...', r'\dots')
finally:
np.set_printoptions(**pops)
# Format unit
# [1:-1] strips the '$' on either side needed for math mode
latex_unit = (self.unit._repr_latex_()[1:-1] # note this is unicode
if self.unit is not None
else _UNIT_NOT_INITIALISED)
return r'${0} \; {1}$'.format(latex_value, latex_unit)
def __format__(self, format_spec):
"""
Format quantities using the new-style python formatting codes
as specifiers for the number.
If the format specifier correctly applies itself to the value,
then it is used to format only the value. If it cannot be
applied to the value, then it is applied to the whole string.
"""
try:
value = format(self.value, format_spec)
full_format_spec = "s"
except ValueError:
value = self.value
full_format_spec = format_spec
return format("{0}{1:s}".format(value, self._unitstr),
full_format_spec)
def decompose(self, bases=[]):
"""
Generates a new `Quantity` with the units
decomposed. Decomposed units have only irreducible units in
them (see `astropy.units.UnitBase.decompose`).
Parameters
----------
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
return self._decompose(False, bases=bases)
def _decompose(self, allowscaledunits=False, bases=[]):
"""
Generates a new `Quantity` with the units decomposed. Decomposed
units have only irreducible units in them (see
`astropy.units.UnitBase.decompose`).
Parameters
----------
allowscaledunits : bool
If True, the resulting `Quantity` may have a scale factor
associated with it. If False, any scaling in the unit will
be subsumed into the value of the resulting `Quantity`
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
new_unit = self.unit.decompose(bases=bases)
# Be careful here because self.value usually is a view of self;
# be sure that the original value is not being modified.
if not allowscaledunits and hasattr(new_unit, 'scale'):
new_value = self.value * new_unit.scale
new_unit = new_unit / new_unit.scale
return self._new_view(new_value, new_unit)
else:
return self._new_view(self.copy(), new_unit)
# These functions need to be overridden to take into account the units
# Array conversion
# http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html#array-conversion
def item(self, *args):
return self._new_view(super().item(*args))
def tolist(self):
raise NotImplementedError("cannot make a list of Quantities. Get "
"list of values with q.value.list()")
def _to_own_unit(self, value, check_precision=True):
try:
_value = value.to_value(self.unit)
except AttributeError:
# We're not a Quantity, so let's try a more general conversion.
# Plain arrays will be converted to dimensionless in the process,
# but anything with a unit attribute will use that.
try:
_value = Quantity(value).to_value(self.unit)
except UnitsError as exc:
# last chance: if this was not something with a unit
# and is all 0, inf, or nan, we treat it as arbitrary unit.
if (not hasattr(value, 'unit') and
can_have_arbitrary_unit(value)):
_value = value
else:
raise exc
if check_precision:
value_dtype = getattr(value, 'dtype', None)
if self.dtype != value_dtype:
self_dtype_array = np.array(_value, self.dtype)
value_dtype_array = np.array(_value, dtype=value_dtype,
copy=False)
if not np.all(np.logical_or(self_dtype_array ==
value_dtype_array,
np.isnan(value_dtype_array))):
raise TypeError("cannot convert value type to array type "
"without precision loss")
return _value
def itemset(self, *args):
if len(args) == 0:
raise ValueError("itemset must have at least one argument")
self.view(np.ndarray).itemset(*(args[:-1] +
(self._to_own_unit(args[-1]),)))
def tostring(self, order='C'):
raise NotImplementedError("cannot write Quantities to string. Write "
"array with q.value.tostring(...).")
def tofile(self, fid, sep="", format="%s"):
raise NotImplementedError("cannot write Quantities to file. Write "
"array with q.value.tofile(...)")
def dump(self, file):
raise NotImplementedError("cannot dump Quantities to file. Write "
"array with q.value.dump()")
def dumps(self):
raise NotImplementedError("cannot dump Quantities to string. Write "
"array with q.value.dumps()")
# astype, byteswap, copy, view, getfield, setflags OK as is
def fill(self, value):
self.view(np.ndarray).fill(self._to_own_unit(value))
# Shape manipulation: resize cannot be done (does not own data), but
# shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only
# the flat iterator needs to be overwritten, otherwise single items are
# returned as numbers.
@property
def flat(self):
"""A 1-D iterator over the Quantity array.
This returns a ``QuantityIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to, but not a subclass of, Python's built-in iterator
object.
"""
return QuantityIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
# Item selection and manipulation
# take, repeat, sort, compress, diagonal OK
def put(self, indices, values, mode='raise'):
self.view(np.ndarray).put(indices, self._to_own_unit(values), mode)
def choose(self, choices, out=None, mode='raise'):
raise NotImplementedError("cannot choose based on quantity. Choose "
"using array with q.value.choose(...)")
# ensure we do not return indices as quantities
def argsort(self, axis=-1, kind='quicksort', order=None):
return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order)
def searchsorted(self, v, *args, **kwargs):
return np.searchsorted(np.array(self),
self._to_own_unit(v, check_precision=False),
*args, **kwargs) # avoid numpy 1.6 problem
def argmax(self, axis=None, out=None):
return self.view(np.ndarray).argmax(axis, out=out)
def argmin(self, axis=None, out=None):
return self.view(np.ndarray).argmin(axis, out=out)
# Calculation -- override ndarray methods to take into account units.
# We use the corresponding numpy functions to evaluate the results, since
# the methods do not always allow calling with keyword arguments.
# For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives
# TypeError: 'a_max' is an invalid keyword argument for this function.
def _wrap_function(self, function, *args, unit=None, out=None, **kwargs):
"""Wrap a numpy function that processes self, returning a Quantity.
Parameters
----------
function : callable
Numpy function to wrap.
args : positional arguments
Any positional arguments to the function beyond the first argument
(which will be set to ``self``).
kwargs : keyword arguments
Keyword arguments to the function.
If present, the following arguments are treated specially:
unit : `~astropy.units.Unit`
Unit of the output result. If not given, the unit of ``self``.
out : `~astropy.units.Quantity`
A Quantity instance in which to store the output.
Notes
-----
Output should always be assigned via a keyword argument, otherwise
no proper account of the unit is taken.
Returns
-------
out : `~astropy.units.Quantity`
Result of the function call, with the unit set properly.
"""
if unit is None:
unit = self.unit
# Ensure we don't loop back by turning any Quantity into array views.
args = (self.value,) + tuple((arg.value if isinstance(arg, Quantity)
else arg) for arg in args)
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray))
kwargs['out'] = check_output(out, unit, arrays, function=function)
# Apply the function and turn it back into a Quantity.
result = function(*args, **kwargs)
return self._result_as_quantity(result, unit, out)
def clip(self, a_min, a_max, out=None):
return self._wrap_function(np.clip, self._to_own_unit(a_min),
self._to_own_unit(a_max), out=out)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return self._wrap_function(np.trace, offset, axis1, axis2, dtype,
out=out)
def var(self, axis=None, dtype=None, out=None, ddof=0):
return self._wrap_function(np.var, axis, dtype,
out=out, ddof=ddof, unit=self.unit**2)
def std(self, axis=None, dtype=None, out=None, ddof=0):
return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof)
def mean(self, axis=None, dtype=None, out=None):
return self._wrap_function(np.mean, axis, dtype, out=out)
def ptp(self, axis=None, out=None):
return self._wrap_function(np.ptp, axis, out=out)
def round(self, decimals=0, out=None):
return self._wrap_function(np.round, decimals, out=out)
def max(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.max, axis, out=out, keepdims=keepdims)
def min(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.min, axis, out=out, keepdims=keepdims)
def sum(self, axis=None, dtype=None, out=None, keepdims=False):
return self._wrap_function(np.sum, axis, dtype, out=out,
keepdims=keepdims)
def prod(self, axis=None, dtype=None, out=None, keepdims=False):
if not self.unit.is_unity():
raise ValueError("cannot use prod on scaled or "
"non-dimensionless Quantity arrays")
return self._wrap_function(np.prod, axis, dtype, out=out,
keepdims=keepdims)
def dot(self, b, out=None):
result_unit = self.unit * getattr(b, 'unit', dimensionless_unscaled)
return self._wrap_function(np.dot, b, out=out, unit=result_unit)
def cumsum(self, axis=None, dtype=None, out=None):
return self._wrap_function(np.cumsum, axis, dtype, out=out)
def cumprod(self, axis=None, dtype=None, out=None):
if not self.unit.is_unity():
raise ValueError("cannot use cumprod on scaled or "
"non-dimensionless Quantity arrays")
return self._wrap_function(np.cumprod, axis, dtype, out=out)
# Calculation: override methods that do not make sense.
def all(self, axis=None, out=None):
raise NotImplementedError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.all(...)")
def any(self, axis=None, out=None):
raise NotImplementedError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.any(...)")
# Calculation: numpy functions that can be overridden with methods.
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis)
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin)
def nansum(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.nansum, axis,
out=out, keepdims=keepdims)
def insert(self, obj, values, axis=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.units.Quantity` object.
This is a thin wrapper around the `numpy.insert` function.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Values to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
The unit of ``values`` must be consistent with this quantity.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the quantity array is flattened before insertion.
Returns
-------
out : `~astropy.units.Quantity`
A copy of quantity with ``values`` inserted. Note that the
insertion does not occur in-place: a new quantity array is returned.
Examples
--------
>>> import astropy.units as u
>>> q = [1, 2] * u.m
>>> q.insert(0, 50 * u.cm)
<Quantity [ 0.5, 1., 2.] m>
>>> q = [[1, 2], [3, 4]] * u.m
>>> q.insert(1, [10, 20] * u.m, axis=0)
<Quantity [[ 1., 2.],
[ 10., 20.],
[ 3., 4.]] m>
>>> q.insert(1, 10 * u.m, axis=1)
<Quantity [[ 1., 10., 2.],
[ 3., 10., 4.]] m>
"""
out_array = np.insert(self.value, obj, self._to_own_unit(values), axis)
return self._new_view(out_array)
class SpecificTypeQuantity(Quantity):
"""Superclass for Quantities of specific physical type.
Subclasses of these work just like :class:`~astropy.units.Quantity`, except
that they are for specific physical types (and may have methods that are
only appropriate for that type). Astropy examples are
:class:`~astropy.coordinates.Angle` and
:class:`~astropy.coordinates.Distance`
At a minimum, subclasses should set ``_equivalent_unit`` to the unit
associated with the physical type.
"""
# The unit for the specific physical type. Instances can only be created
# with units that are equivalent to this.
_equivalent_unit = None
# The default unit used for views. Even with `None`, views of arrays
# without units are possible, but will have an uninitalized unit.
_unit = None
# Default unit for initialization through the constructor.
_default_unit = None
# ensure that we get precedence over our superclass.
__array_priority__ = Quantity.__array_priority__ + 10
def __quantity_subclass__(self, unit):
if unit.is_equivalent(self._equivalent_unit):
return type(self), True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if unit is None or not unit.is_equivalent(self._equivalent_unit):
raise UnitTypeError(
"{0} instances require units equivalent to '{1}'"
.format(type(self).__name__, self._equivalent_unit) +
(", but no unit was given." if unit is None else
", so cannot set it to '{0}'.".format(unit)))
super()._set_unit(unit)
def isclose(a, b, rtol=1.e-5, atol=None, **kwargs):
"""
Notes
-----
Returns True if two arrays are element-wise equal within a tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.isclose`.
"""
return np.isclose(*_unquantify_allclose_arguments(a, b, rtol, atol),
**kwargs)
def allclose(a, b, rtol=1.e-5, atol=None, **kwargs):
"""
Notes
-----
Returns True if two arrays are element-wise equal within a tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`.
"""
return np.allclose(*_unquantify_allclose_arguments(a, b, rtol, atol),
**kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = Quantity(actual, subok=True, copy=False)
desired = Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except UnitsError:
raise UnitsError("Units for 'desired' ({0}) and 'actual' ({1}) "
"are not convertible"
.format(desired.unit, actual.unit))
if atol is None:
# by default, we assume an absolute tolerance of 0
atol = Quantity(0)
else:
atol = Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except UnitsError:
raise UnitsError("Units for 'atol' ({0}) and 'actual' ({1}) "
"are not convertible"
.format(atol.unit, actual.unit))
rtol = Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(dimensionless_unscaled)
except Exception:
raise UnitsError("`rtol` should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
|
284d1ad1070ed67798ef078a87bfafbc253895231cfc52ab1c9dd5ffdbf4b0c8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions for retrieving solar system
ephemerides from jplephem.
"""
from urllib.parse import urlparse
from collections import OrderedDict
import numpy as np
from .sky_coordinate import SkyCoord
from ..utils.data import download_file
from ..utils.decorators import classproperty
from ..utils.state import ScienceState
from ..utils import indent
from .. import units as u
from .. import _erfa as erfa
from ..constants import c as speed_of_light
from .representation import CartesianRepresentation
from .orbital_elements import calc_moon
from .builtin_frames import GCRS, ICRS
from .builtin_frames.utils import get_jd12
__all__ = ["get_body", "get_moon", "get_body_barycentric",
"get_body_barycentric_posvel", "solar_system_ephemeris"]
DEFAULT_JPL_EPHEMERIS = 'de430'
"""List of kernel pairs needed to calculate positions of a given object."""
BODY_NAME_TO_KERNEL_SPEC = OrderedDict(
(('sun', [(0, 10)]),
('mercury', [(0, 1), (1, 199)]),
('venus', [(0, 2), (2, 299)]),
('earth-moon-barycenter', [(0, 3)]),
('earth', [(0, 3), (3, 399)]),
('moon', [(0, 3), (3, 301)]),
('mars', [(0, 4)]),
('jupiter', [(0, 5)]),
('saturn', [(0, 6)]),
('uranus', [(0, 7)]),
('neptune', [(0, 8)]),
('pluto', [(0, 9)]))
)
"""Indices to the plan94 routine for the given object."""
PLAN94_BODY_NAME_TO_PLANET_INDEX = OrderedDict(
(('mercury', 1),
('venus', 2),
('earth-moon-barycenter', 3),
('mars', 4),
('jupiter', 5),
('saturn', 6),
('uranus', 7),
('neptune', 8)))
_EPHEMERIS_NOTE = """
You can either give an explicit ephemeris or use a default, which is normally
a built-in ephemeris that does not require ephemeris files. To change
the default to be the JPL ephemeris::
>>> from astropy.coordinates import solar_system_ephemeris
>>> solar_system_ephemeris.set('jpl') # doctest: +SKIP
Use of any JPL ephemeris requires the jplephem package
(https://pypi.python.org/pypi/jplephem).
If needed, the ephemeris file will be downloaded (and cached).
One can check which bodies are covered by a given ephemeris using::
>>> solar_system_ephemeris.bodies
('earth', 'sun', 'moon', 'mercury', 'venus', 'earth-moon-barycenter', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune')
"""[1:-1]
class solar_system_ephemeris(ScienceState):
"""Default ephemerides for calculating positions of Solar-System bodies.
This can be one of the following::
- 'builtin': polynomial approximations to the orbital elements.
- 'de430' or 'de432s': short-cuts for recent JPL dynamical models.
- 'jpl': Alias for the default JPL ephemeris (currently, 'de430').
- URL: (str) The url to a SPK ephemeris in SPICE binary (.bsp) format.
- `None`: Ensure an Exception is raised without an explicit ephemeris.
The default is 'builtin', which uses the ``epv00`` and ``plan94``
routines from the ``erfa`` implementation of the Standards Of Fundamental
Astronomy library.
Notes
-----
Any file required will be downloaded (and cached) when the state is set.
The default Satellite Planet Kernel (SPK) file from NASA JPL (de430) is
~120MB, and covers years ~1550-2650 CE [1]_. The smaller de432s file is
~10MB, and covers years 1950-2050 [2]_. Older versions of the JPL
ephemerides (such as the widely used de200) can be used via their URL [3]_.
.. [1] http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de430-de431.txt
.. [2] http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de432s.txt
.. [3] http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/a_old_versions/
"""
_value = 'builtin'
_kernel = None
@classmethod
def validate(cls, value):
# make no changes if value is None
if value is None:
return cls._value
# Set up Kernel; if the file is not in cache, this will download it.
cls.get_kernel(value)
return value
@classmethod
def get_kernel(cls, value):
# ScienceState only ensures the `_value` attribute is up to date,
# so we need to be sure any kernel returned is consistent.
if cls._kernel is None or cls._kernel.origin != value:
if cls._kernel is not None:
cls._kernel.daf.file.close()
cls._kernel = None
kernel = _get_kernel(value)
if kernel is not None:
kernel.origin = value
cls._kernel = kernel
return cls._kernel
@classproperty
def kernel(cls):
return cls.get_kernel(cls._value)
@classproperty
def bodies(cls):
if cls._value is None:
return None
if cls._value.lower() == 'builtin':
return (('earth', 'sun', 'moon') +
tuple(PLAN94_BODY_NAME_TO_PLANET_INDEX.keys()))
else:
return tuple(BODY_NAME_TO_KERNEL_SPEC.keys())
def _get_kernel(value):
"""
Try importing jplephem, download/retrieve from cache the Satellite Planet
Kernel corresponding to the given ephemeris.
"""
if value is None or value.lower() == 'builtin':
return None
if value.lower() == 'jpl':
value = DEFAULT_JPL_EPHEMERIS
if value.lower() in ('de430', 'de432s'):
value = ('http://naif.jpl.nasa.gov/pub/naif/generic_kernels'
'/spk/planets/{:s}.bsp'.format(value.lower()))
else:
try:
urlparse(value)
except Exception:
raise ValueError('{} was not one of the standard strings and '
'could not be parsed as a URL'.format(value))
try:
from jplephem.spk import SPK
except ImportError:
raise ImportError("Solar system JPL ephemeris calculations require "
"the jplephem package "
"(https://pypi.python.org/pypi/jplephem)")
return SPK.open(download_file(value, cache=True))
def _get_body_barycentric_posvel(body, time, ephemeris=None,
get_velocity=True):
"""Calculate the barycentric position (and velocity) of a solar system body.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
get_velocity : bool, optional
Whether or not to calculate the velocity as well as the position.
Returns
-------
position : `~astropy.coordinates.CartesianRepresentation` or tuple
Barycentric (ICRS) position or tuple of position and velocity.
Notes
-----
No velocity can be calculated with the built-in ephemeris for the Moon.
Whether or not velocities are calculated makes little difference for the
built-in ephemerides, but for most JPL ephemeris files, the execution time
roughly doubles.
"""
if ephemeris is None:
ephemeris = solar_system_ephemeris.get()
if ephemeris is None:
raise ValueError(_EPHEMERIS_NOTE)
kernel = solar_system_ephemeris.kernel
else:
kernel = _get_kernel(ephemeris)
jd1, jd2 = get_jd12(time, 'tdb')
if kernel is None:
body = body.lower()
earth_pv_helio, earth_pv_bary = erfa.epv00(jd1, jd2)
if body == 'earth':
body_pv_bary = earth_pv_bary
elif body == 'moon':
if get_velocity:
raise KeyError("the Moon's velocity cannot be calculated with "
"the '{0}' ephemeris.".format(ephemeris))
return calc_moon(time).cartesian
else:
sun_pv_bary = erfa.pvmpv(earth_pv_bary, earth_pv_helio)
if body == 'sun':
body_pv_bary = sun_pv_bary
else:
try:
body_index = PLAN94_BODY_NAME_TO_PLANET_INDEX[body]
except KeyError:
raise KeyError("{0}'s position and velocity cannot be "
"calculated with the '{1}' ephemeris."
.format(body, ephemeris))
body_pv_helio = erfa.plan94(jd1, jd2, body_index)
body_pv_bary = erfa.pvppv(body_pv_helio, sun_pv_bary)
body_pos_bary = CartesianRepresentation(
body_pv_bary['p'], unit=u.au, xyz_axis=-1, copy=False)
if get_velocity:
body_vel_bary = CartesianRepresentation(
body_pv_bary['v'], unit=u.au/u.day, xyz_axis=-1,
copy=False)
else:
if isinstance(body, str):
# Look up kernel chain for JPL ephemeris, based on name
try:
kernel_spec = BODY_NAME_TO_KERNEL_SPEC[body.lower()]
except KeyError:
raise KeyError("{0}'s position cannot be calculated with "
"the {1} ephemeris.".format(body, ephemeris))
else:
# otherwise, assume the user knows what their doing and intentionally
# passed in a kernel chain
kernel_spec = body
# jplephem cannot handle multi-D arrays, so convert to 1D here.
jd1_shape = getattr(jd1, 'shape', ())
if len(jd1_shape) > 1:
jd1, jd2 = jd1.ravel(), jd2.ravel()
# Note that we use the new jd1.shape here to create a 1D result array.
# It is reshaped below.
body_posvel_bary = np.zeros((2 if get_velocity else 1, 3) +
getattr(jd1, 'shape', ()))
for pair in kernel_spec:
spk = kernel[pair]
if spk.data_type == 3:
# Type 3 kernels contain both position and velocity.
posvel = spk.compute(jd1, jd2)
if get_velocity:
body_posvel_bary += posvel.reshape(body_posvel_bary.shape)
else:
body_posvel_bary[0] += posvel[:4]
else:
# spk.generate first yields the position and then the
# derivative. If no velocities are desired, body_posvel_bary
# has only one element and thus the loop ends after a single
# iteration, avoiding the velocity calculation.
for body_p_or_v, p_or_v in zip(body_posvel_bary,
spk.generate(jd1, jd2)):
body_p_or_v += p_or_v
body_posvel_bary.shape = body_posvel_bary.shape[:2] + jd1_shape
body_pos_bary = CartesianRepresentation(body_posvel_bary[0],
unit=u.km, copy=False)
if get_velocity:
body_vel_bary = CartesianRepresentation(body_posvel_bary[1],
unit=u.km/u.day, copy=False)
return (body_pos_bary, body_vel_bary) if get_velocity else body_pos_bary
def get_body_barycentric_posvel(body, time, ephemeris=None):
"""Calculate the barycentric position and velocity of a solar system body.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
position, velocity : tuple of `~astropy.coordinates.CartesianRepresentation`
Tuple of barycentric (ICRS) position and velocity.
See also
--------
get_body_barycentric : to calculate position only.
This is faster by about a factor two for JPL kernels, but has no
speed advantage for the built-in ephemeris.
Notes
-----
The velocity cannot be calculated for the Moon. To just get the position,
use :func:`~astropy.coordinates.get_body_barycentric`.
"""
return _get_body_barycentric_posvel(body, time, ephemeris)
get_body_barycentric_posvel.__doc__ += indent(_EPHEMERIS_NOTE)[4:]
def get_body_barycentric(body, time, ephemeris=None):
"""Calculate the barycentric position of a solar system body.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
position : `~astropy.coordinates.CartesianRepresentation`
Barycentric (ICRS) position of the body in cartesian coordinates
See also
--------
get_body_barycentric_posvel : to calculate both position and velocity.
Notes
-----
"""
return _get_body_barycentric_posvel(body, time, ephemeris,
get_velocity=False)
get_body_barycentric.__doc__ += indent(_EPHEMERIS_NOTE)[4:]
def _get_apparent_body_position(body, time, ephemeris):
"""Calculate the apparent position of body ``body`` relative to Earth.
This corrects for the light-travel time to the object.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``~astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
cartesian_position : `~astropy.coordinates.CartesianRepresentation`
Barycentric (ICRS) apparent position of the body in cartesian coordinates
"""
if ephemeris is None:
ephemeris = solar_system_ephemeris.get()
# builtin ephemeris and moon is a special case, with no need to account for
# light travel time, since this is already included in the Meeus algorithm
# used.
if ephemeris == 'builtin' and body.lower() == 'moon':
return get_body_barycentric(body, time, ephemeris)
# Calculate position given approximate light travel time.
delta_light_travel_time = 20. * u.s
emitted_time = time
light_travel_time = 0. * u.s
earth_loc = get_body_barycentric('earth', time, ephemeris)
while np.any(np.fabs(delta_light_travel_time) > 1.0e-8*u.s):
body_loc = get_body_barycentric(body, emitted_time, ephemeris)
earth_distance = (body_loc - earth_loc).norm()
delta_light_travel_time = (light_travel_time -
earth_distance/speed_of_light)
light_travel_time = earth_distance/speed_of_light
emitted_time = time - light_travel_time
return get_body_barycentric(body, emitted_time, ephemeris)
_get_apparent_body_position.__doc__ += indent(_EPHEMERIS_NOTE)[4:]
def get_body(body, time, location=None, ephemeris=None):
"""
Get a `~astropy.coordinates.SkyCoord` for a solar system body as observed
from a location on Earth in the `~astropy.coordinates.GCRS` reference
system.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
location : `~astropy.coordinates.EarthLocation`, optional
Location of observer on the Earth. If not given, will be taken from
``time`` (if not present, a geocentric observer will be assumed).
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
``astropy.coordinates.solar_system_ephemeris.set`` (which is
set to 'builtin' by default).
Returns
-------
skycoord : `~astropy.coordinates.SkyCoord`
GCRS Coordinate for the body
Notes
-----
"""
if location is None:
location = time.location
cartrep = _get_apparent_body_position(body, time, ephemeris)
icrs = ICRS(cartrep)
if location is not None:
obsgeoloc, obsgeovel = location.get_gcrs_posvel(time)
gcrs = icrs.transform_to(GCRS(obstime=time,
obsgeoloc=obsgeoloc,
obsgeovel=obsgeovel))
else:
gcrs = icrs.transform_to(GCRS(obstime=time))
return SkyCoord(gcrs)
get_body.__doc__ += indent(_EPHEMERIS_NOTE)[4:]
def get_moon(time, location=None, ephemeris=None):
"""
Get a `~astropy.coordinates.SkyCoord` for the Earth's Moon as observed
from a location on Earth in the `~astropy.coordinates.GCRS` reference
system.
Parameters
----------
time : `~astropy.time.Time`
Time of observation
location : `~astropy.coordinates.EarthLocation`
Location of observer on the Earth. If none is supplied, taken from
``time`` (if not present, a geocentric observer will be assumed).
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
``astropy.coordinates.solar_system_ephemeris.set`` (which is
set to 'builtin' by default).
Returns
-------
skycoord : `~astropy.coordinates.SkyCoord`
GCRS Coordinate for the Moon
Notes
-----
"""
return get_body('moon', time, location=location, ephemeris=ephemeris)
get_moon.__doc__ += indent(_EPHEMERIS_NOTE)[4:]
def _apparent_position_in_true_coordinates(skycoord):
"""
Convert Skycoord in GCRS frame into one in which RA and Dec
are defined w.r.t to the true equinox and poles of the Earth
"""
jd1, jd2 = get_jd12(skycoord.obstime, 'tt')
_, _, _, _, _, _, _, rbpn = erfa.pn00a(jd1, jd2)
return SkyCoord(skycoord.frame.realize_frame(
skycoord.cartesian.transform(rbpn)))
|
4bb9148c92a7708adb7adc544c5fef716ff4bf91c8788c3f9112f144e20a0bec | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains functions for matching coordinate catalogs.
"""
import numpy as np
from .representation import UnitSphericalRepresentation
from .. import units as u
from . import Angle
__all__ = ['match_coordinates_3d', 'match_coordinates_sky', 'search_around_3d',
'search_around_sky']
def match_coordinates_3d(matchcoord, catalogcoord, nthneighbor=1, storekdtree='kdtree_3d'):
"""
Finds the nearest 3-dimensional matches of a coordinate or coordinates in
a set of catalog coordinates.
This finds the 3-dimensional closest neighbor, which is only different
from the on-sky distance if ``distance`` is set in either ``matchcoord``
or ``catalogcoord``.
Parameters
----------
matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The coordinate(s) to match to the catalog.
catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The base catalog in which to search for matches. Typically this will
be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is desired here,
as that is correct for matching one set of coordinates to another.
The next likely use case is ``2``, for matching a coordinate catalog
against *itself* (``1`` is inappropriate because each point will find
itself as the closest match).
storekdtree : bool or str, optional
If a string, will store the KD-Tree used for the computation
in the ``catalogcoord``, as in ``catalogcoord.cache`` with the
provided name. This dramatically speeds up subsequent calls with the
same catalog. If False, the KD-Tree is discarded after use.
Returns
-------
idx : integer array
Indices into ``catalogcoord`` to get the matched points for each
``matchcoord``. Shape matches ``matchcoord``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each ``matchcoord``
and the ``matchcoord``. Shape matches ``matchcoord``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the closest match for each ``matchcoord`` and
the ``matchcoord``. Shape matches ``matchcoord``.
Notes
-----
This function requires `SciPy <https://www.scipy.org/>`_ to be installed
or it will fail.
"""
if catalogcoord.isscalar or len(catalogcoord) < 1:
raise ValueError('The catalog for coordinate matching cannot be a '
'scalar or length-0.')
kdt = _get_cartesian_kdtree(catalogcoord, storekdtree)
# make sure coordinate systems match
matchcoord = matchcoord.transform_to(catalogcoord)
# make sure units match
catunit = catalogcoord.cartesian.x.unit
matchxyz = matchcoord.cartesian.xyz.to(catunit)
matchflatxyz = matchxyz.reshape((3, np.prod(matchxyz.shape) // 3))
dist, idx = kdt.query(matchflatxyz.T, nthneighbor)
if nthneighbor > 1: # query gives 1D arrays if k=1, 2D arrays otherwise
dist = dist[:, -1]
idx = idx[:, -1]
sep2d = catalogcoord[idx].separation(matchcoord)
return idx.reshape(matchxyz.shape[1:]), sep2d, dist.reshape(matchxyz.shape[1:]) * catunit
def match_coordinates_sky(matchcoord, catalogcoord, nthneighbor=1, storekdtree='kdtree_sky'):
"""
Finds the nearest on-sky matches of a coordinate or coordinates in
a set of catalog coordinates.
This finds the on-sky closest neighbor, which is only different from the
3-dimensional match if ``distance`` is set in either ``matchcoord``
or ``catalogcoord``.
Parameters
----------
matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The coordinate(s) to match to the catalog.
catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The base catalog in which to search for matches. Typically this will
be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is desired here,
as that is correct for matching one set of coordinates to another.
The next likely use case is ``2``, for matching a coordinate catalog
against *itself* (``1`` is inappropriate because each point will find
itself as the closest match).
storekdtree : bool or str, optional
If a string, will store the KD-Tree used for the computation
in the ``catalogcoord`` in ``catalogcoord.cache`` with the
provided name. This dramatically speeds up subsequent calls with the
same catalog. If False, the KD-Tree is discarded after use.
Returns
-------
idx : integer array
Indices into ``catalogcoord`` to get the matched points for each
``matchcoord``. Shape matches ``matchcoord``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the closest match for each ``matchcoord`` and
the ``matchcoord``. Shape matches ``matchcoord``. If either
``matchcoord`` or ``catalogcoord`` don't have a distance, this is the 3D
distance on the unit sphere, rather than a true distance.
Notes
-----
This function requires `SciPy <https://www.scipy.org/>`_ to be installed
or it will fail.
"""
if catalogcoord.isscalar or len(catalogcoord) < 1:
raise ValueError('The catalog for coordinate matching cannot be a '
'scalar or length-0.')
# send to catalog frame
newmatch = matchcoord.transform_to(catalogcoord)
# strip out distance info
match_urepr = newmatch.data.represent_as(UnitSphericalRepresentation)
newmatch_u = newmatch.realize_frame(match_urepr)
cat_urepr = catalogcoord.data.represent_as(UnitSphericalRepresentation)
newcat_u = catalogcoord.realize_frame(cat_urepr)
# Check for a stored KD-tree on the passed-in coordinate. Normally it will
# have a distinct name from the "3D" one, so it's safe to use even though
# it's based on UnitSphericalRepresentation.
storekdtree = catalogcoord.cache.get(storekdtree, storekdtree)
idx, sep2d, sep3d = match_coordinates_3d(newmatch_u, newcat_u, nthneighbor, storekdtree)
# sep3d is *wrong* above, because the distance information was removed,
# unless one of the catalogs doesn't have a real distance
if not (isinstance(catalogcoord.data, UnitSphericalRepresentation) or
isinstance(newmatch.data, UnitSphericalRepresentation)):
sep3d = catalogcoord[idx].separation_3d(newmatch)
# update the kdtree on the actual passed-in coordinate
if isinstance(storekdtree, str):
catalogcoord.cache[storekdtree] = newcat_u.cache[storekdtree]
elif storekdtree is True:
# the old backwards-compatible name
catalogcoord.cache['kdtree'] = newcat_u.cache['kdtree']
return idx, sep2d, sep3d
def search_around_3d(coords1, coords2, distlimit, storekdtree='kdtree_3d'):
"""
Searches for pairs of points that are at least as close as a specified
distance in 3D space.
This is intended for use on coordinate objects with arrays of coordinates,
not scalars. For scalar coordinates, it is better to use the
``separation_3d`` methods.
Parameters
----------
coords1 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The first set of coordinates, which will be searched for matches from
``coords2`` within ``seplimit``. Cannot be a scalar coordinate.
coords2 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The second set of coordinates, which will be searched for matches from
``coords1`` within ``seplimit``. Cannot be a scalar coordinate.
distlimit : `~astropy.units.Quantity` with distance units
The physical radius to search within.
storekdtree : bool or str, optional
If a string, will store the KD-Tree used in the search with the name
``storekdtree`` in ``coords2.cache``. This speeds up subsequent calls
to this function. If False, the KD-Trees are not saved.
Returns
-------
idx1 : integer array
Indices into ``coords1`` that matches to the corresponding element of
``idx2``. Shape matches ``idx2``.
idx2 : integer array
Indices into ``coords2`` that matches to the corresponding element of
``idx1``. Shape matches ``idx1``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches ``idx1``
and ``idx2``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the coordinates. Shape matches ``idx1`` and
``idx2``. The unit is that of ``coords1``.
Notes
-----
This function requires `SciPy <https://www.scipy.org/>`_ (>=0.12.0)
to be installed or it will fail.
If you are using this function to search in a catalog for matches around
specific points, the convention is for ``coords2`` to be the catalog, and
``coords1`` are the points to search around. While these operations are
mathematically the same if ``coords1`` and ``coords2`` are flipped, some of
the optimizations may work better if this convention is obeyed.
In the current implementation, the return values are always sorted in the
same order as the ``coords1`` (so ``idx1`` is in ascending order). This is
considered an implementation detail, though, so it could change in a future
release.
"""
if not distlimit.isscalar:
raise ValueError('distlimit must be a scalar in search_around_3d')
if coords1.isscalar or coords2.isscalar:
raise ValueError('One of the inputs to search_around_3d is a scalar. '
'search_around_3d is intended for use with array '
'coordinates, not scalars. Instead, use '
'``coord1.separation_3d(coord2) < distlimit`` to find '
'the coordinates near a scalar coordinate.')
if len(coords1) == 0 or len(coords2) == 0:
# Empty array input: return empty match
return (np.array([], dtype=int), np.array([], dtype=int),
Angle([], u.deg),
u.Quantity([], coords1.distance.unit))
kdt2 = _get_cartesian_kdtree(coords2, storekdtree)
cunit = coords2.cartesian.x.unit
# we convert coord1 to match coord2's frame. We do it this way
# so that if the conversion does happen, the KD tree of coord2 at least gets
# saved. (by convention, coord2 is the "catalog" if that makes sense)
coords1 = coords1.transform_to(coords2)
kdt1 = _get_cartesian_kdtree(coords1, storekdtree, forceunit=cunit)
# this is the *cartesian* 3D distance that corresponds to the given angle
d = distlimit.to_value(cunit)
idxs1 = []
idxs2 = []
for i, matches in enumerate(kdt1.query_ball_tree(kdt2, d)):
for match in matches:
idxs1.append(i)
idxs2.append(match)
idxs1 = np.array(idxs1, dtype=int)
idxs2 = np.array(idxs2, dtype=int)
if idxs1.size == 0:
d2ds = Angle([], u.deg)
d3ds = u.Quantity([], coords1.distance.unit)
else:
d2ds = coords1[idxs1].separation(coords2[idxs2])
d3ds = coords1[idxs1].separation_3d(coords2[idxs2])
return idxs1, idxs2, d2ds, d3ds
def search_around_sky(coords1, coords2, seplimit, storekdtree='kdtree_sky'):
"""
Searches for pairs of points that have an angular separation at least as
close as a specified angle.
This is intended for use on coordinate objects with arrays of coordinates,
not scalars. For scalar coordinates, it is better to use the ``separation``
methods.
Parameters
----------
coords1 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The first set of coordinates, which will be searched for matches from
``coords2`` within ``seplimit``. Cannot be a scalar coordinate.
coords2 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The second set of coordinates, which will be searched for matches from
``coords1`` within ``seplimit``. Cannot be a scalar coordinate.
seplimit : `~astropy.units.Quantity` with angle units
The on-sky separation to search within.
storekdtree : bool or str, optional
If a string, will store the KD-Tree used in the search with the name
``storekdtree`` in ``coords2.cache``. This speeds up subsequent calls
to this function. If False, the KD-Trees are not saved.
Returns
-------
idx1 : integer array
Indices into ``coords1`` that matches to the corresponding element of
``idx2``. Shape matches ``idx2``.
idx2 : integer array
Indices into ``coords2`` that matches to the corresponding element of
``idx1``. Shape matches ``idx1``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches ``idx1``
and ``idx2``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the coordinates. Shape matches ``idx1``
and ``idx2``; the unit is that of ``coords1``.
If either ``coords1`` or ``coords2`` don't have a distance,
this is the 3D distance on the unit sphere, rather than a
physical distance.
Notes
-----
This function requires `SciPy <https://www.scipy.org/>`_ (>=0.12.0)
to be installed or it will fail.
In the current implementation, the return values are always sorted in the
same order as the ``coords1`` (so ``idx1`` is in ascending order). This is
considered an implementation detail, though, so it could change in a future
release.
"""
if not seplimit.isscalar:
raise ValueError('seplimit must be a scalar in search_around_sky')
if coords1.isscalar or coords2.isscalar:
raise ValueError('One of the inputs to search_around_sky is a scalar. '
'search_around_sky is intended for use with array '
'coordinates, not scalars. Instead, use '
'``coord1.separation(coord2) < seplimit`` to find the '
'coordinates near a scalar coordinate.')
if len(coords1) == 0 or len(coords2) == 0:
# Empty array input: return empty match
if coords2.distance.unit == u.dimensionless_unscaled:
distunit = u.dimensionless_unscaled
else:
distunit = coords1.distance.unit
return (np.array([], dtype=int), np.array([], dtype=int),
Angle([], u.deg),
u.Quantity([], distunit))
# we convert coord1 to match coord2's frame. We do it this way
# so that if the conversion does happen, the KD tree of coord2 at least gets
# saved. (by convention, coord2 is the "catalog" if that makes sense)
coords1 = coords1.transform_to(coords2)
# strip out distance info
urepr1 = coords1.data.represent_as(UnitSphericalRepresentation)
ucoords1 = coords1.realize_frame(urepr1)
kdt1 = _get_cartesian_kdtree(ucoords1, storekdtree)
if storekdtree and coords2.cache.get(storekdtree):
# just use the stored KD-Tree
kdt2 = coords2.cache[storekdtree]
else:
# strip out distance info
urepr2 = coords2.data.represent_as(UnitSphericalRepresentation)
ucoords2 = coords2.realize_frame(urepr2)
kdt2 = _get_cartesian_kdtree(ucoords2, storekdtree)
if storekdtree:
# save the KD-Tree in coords2, *not* ucoords2
coords2.cache['kdtree' if storekdtree is True else storekdtree] = kdt2
# this is the *cartesian* 3D distance that corresponds to the given angle
r = (2 * np.sin(Angle(seplimit) / 2.0)).value
idxs1 = []
idxs2 = []
for i, matches in enumerate(kdt1.query_ball_tree(kdt2, r)):
for match in matches:
idxs1.append(i)
idxs2.append(match)
idxs1 = np.array(idxs1, dtype=int)
idxs2 = np.array(idxs2, dtype=int)
if idxs1.size == 0:
if coords2.distance.unit == u.dimensionless_unscaled:
distunit = u.dimensionless_unscaled
else:
distunit = coords1.distance.unit
d2ds = Angle([], u.deg)
d3ds = u.Quantity([], distunit)
else:
d2ds = coords1[idxs1].separation(coords2[idxs2])
try:
d3ds = coords1[idxs1].separation_3d(coords2[idxs2])
except ValueError:
# they don't have distances, so we just fall back on the cartesian
# distance, computed from d2ds
d3ds = 2 * np.sin(d2ds / 2.0)
return idxs1, idxs2, d2ds, d3ds
def _get_cartesian_kdtree(coord, attrname_or_kdt='kdtree', forceunit=None):
"""
This is a utility function to retrieve (and build/cache, if necessary)
a 3D cartesian KD-Tree from various sorts of astropy coordinate objects.
Parameters
----------
coord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The coordinates to build the KD-Tree for.
attrname_or_kdt : bool or str or KDTree
If a string, will store the KD-Tree used for the computation in the
``coord``, in ``coord.cache`` with the provided name. If given as a
KD-Tree, it will just be used directly.
forceunit : unit or None
If a unit, the cartesian coordinates will convert to that unit before
being put in the KD-Tree. If None, whatever unit it's already in
will be used
Returns
-------
kdt : `~scipy.spatial.cKDTree` or `~scipy.spatial.KDTree`
The KD-Tree representing the 3D cartesian representation of the input
coordinates.
"""
from warnings import warn
# without scipy this will immediately fail
from scipy import spatial
try:
KDTree = spatial.cKDTree
except Exception:
warn('C-based KD tree not found, falling back on (much slower) '
'python implementation')
KDTree = spatial.KDTree
if attrname_or_kdt is True: # backwards compatibility for pre v0.4
attrname_or_kdt = 'kdtree'
# figure out where any cached KDTree might be
if isinstance(attrname_or_kdt, str):
kdt = coord.cache.get(attrname_or_kdt, None)
if kdt is not None and not isinstance(kdt, KDTree):
raise TypeError('The `attrname_or_kdt` "{0}" is not a scipy KD tree!'.format(attrname_or_kdt))
elif isinstance(attrname_or_kdt, KDTree):
kdt = attrname_or_kdt
attrname_or_kdt = None
elif not attrname_or_kdt:
kdt = None
else:
raise TypeError('Invalid `attrname_or_kdt` argument for KD-Tree:' +
str(attrname_or_kdt))
if kdt is None:
# need to build the cartesian KD-tree for the catalog
if forceunit is None:
cartxyz = coord.cartesian.xyz
else:
cartxyz = coord.cartesian.xyz.to(forceunit)
flatxyz = cartxyz.reshape((3, np.prod(cartxyz.shape) // 3))
try:
# Set compact_nodes=False, balanced_tree=False to use
# "sliding midpoint" rule, which is much faster than standard for
# many common use cases
kdt = KDTree(flatxyz.value.T, compact_nodes=False, balanced_tree=False)
except TypeError:
# Python implementation does not take compact_nodes and balanced_tree
# as arguments. However, it uses sliding midpoint rule by default
kdt = KDTree(flatxyz.value.T)
if attrname_or_kdt:
# cache the kdtree in `coord`
coord.cache[attrname_or_kdt] = kdt
return kdt
|
84481ed28a1739aaad65a55e37d15d49b3e3097af99d38da33b716344d691d0e |
import re
import copy
import warnings
import collections
import numpy as np
from .. import _erfa as erfa
from ..utils.compat.misc import override__dir__
from ..units import Unit, IrreducibleUnit
from .. import units as u
from ..constants import c as speed_of_light
from ..wcs.utils import skycoord_to_pixel, pixel_to_skycoord
from ..utils.exceptions import AstropyDeprecationWarning
from ..utils.data_info import MixinInfo
from ..utils import ShapedLikeNDArray
from ..time import Time
from .distances import Distance
from .angles import Angle
from .baseframe import (BaseCoordinateFrame, frame_transform_graph,
GenericFrame, _get_repr_cls, _get_diff_cls,
_normalize_representation_type)
from .builtin_frames import ICRS, SkyOffsetFrame
from .representation import (BaseRepresentation, SphericalRepresentation,
UnitSphericalRepresentation, SphericalDifferential)
__all__ = ['SkyCoord', 'SkyCoordInfo']
PLUS_MINUS_RE = re.compile(r'(\+|\-)')
J_PREFIXED_RA_DEC_RE = re.compile(
r"""J # J prefix
([0-9]{6,7}\.?[0-9]{0,2}) # RA as HHMMSS.ss or DDDMMSS.ss, optional decimal digits
([\+\-][0-9]{6}\.?[0-9]{0,2})\s*$ # Dec as DDMMSS.ss, optional decimal digits
""", re.VERBOSE)
class SkyCoordInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = set(['unit']) # Unit is read-only
_supports_indexing = False
@staticmethod
def default_format(val):
repr_data = val.info._repr_data
formats = ['{0.' + compname + '.value:}' for compname
in repr_data.components]
return ','.join(formats).format(repr_data)
@property
def unit(self):
repr_data = self._repr_data
unit = ','.join(str(getattr(repr_data, comp).unit) or 'None'
for comp in repr_data.components)
return unit
@property
def _repr_data(self):
if self._parent is None:
return None
sc = self._parent
if (issubclass(sc.representation_type, SphericalRepresentation) and
isinstance(sc.data, UnitSphericalRepresentation)):
repr_data = sc.represent_as(sc.data.__class__, in_frame_units=True)
else:
repr_data = sc.represent_as(sc.representation_type,
in_frame_units=True)
return repr_data
def _represent_as_dict(self):
obj = self._parent
attrs = (list(obj.representation_component_names) +
list(frame_transform_graph.frame_attributes.keys()))
# Don't output distance if it is all unitless 1.0
if 'distance' in attrs and np.all(obj.distance == 1.0):
attrs.remove('distance')
self._represent_as_dict_attrs = attrs
out = super()._represent_as_dict()
out['representation_type'] = obj.representation_type.get_name()
out['frame'] = obj.frame.name
# Note that obj.info.unit is a fake composite unit (e.g. 'deg,deg,None'
# or None,None,m) and is not stored. The individual attributes have
# units.
return out
class SkyCoord(ShapedLikeNDArray):
"""High-level object providing a flexible interface for celestial coordinate
representation, manipulation, and transformation between systems.
The `SkyCoord` class accepts a wide variety of inputs for initialization. At
a minimum these must provide one or more celestial coordinate values with
unambiguous units. Inputs may be scalars or lists/tuples/arrays, yielding
scalar or array coordinates (can be checked via ``SkyCoord.isscalar``).
Typically one also specifies the coordinate frame, though this is not
required. The general pattern for spherical representations is::
SkyCoord(COORD, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [DISTANCE], frame=FRAME, unit=UNIT, keyword_args ...)
SkyCoord([FRAME], <lon_attr>=LON, <lat_attr>=LAT, keyword_args ...)
It is also possible to input coordinate values in other representations
such as cartesian or cylindrical. In this case one includes the keyword
argument ``representation_type='cartesian'`` (for example) along with data
in ``x``, ``y``, and ``z``.
Examples
--------
The examples below illustrate common ways of initializing a `SkyCoord`
object. For a complete description of the allowed syntax see the
full coordinates documentation. First some imports::
>>> from astropy.coordinates import SkyCoord # High-level coordinates
>>> from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
>>> from astropy.coordinates import Angle, Latitude, Longitude # Angles
>>> import astropy.units as u
The coordinate values and frame specification can now be provided using
positional and keyword arguments::
>>> c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
>>> c = SkyCoord([1, 2, 3], [-30, 45, 8], "icrs", unit="deg") # 3 coords
>>> coords = ["1:12:43.2 +1:12:43", "1 12 43.2 +1 12 43"]
>>> c = SkyCoord(coords, FK4, unit=(u.deg, u.hourangle), obstime="J1992.21")
>>> c = SkyCoord("1h12m43.2s +1d12m43s", Galactic) # Units from string
>>> c = SkyCoord("galactic", l="1h12m43.2s", b="+1d12m43s")
>>> ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
>>> dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
>>> c = SkyCoord(ra, dec, frame='icrs')
>>> c = SkyCoord(ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56')
>>> c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
>>> c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults
>>> c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic',
... representation_type='cartesian')
>>> c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)])
Velocity components (proper motions or radial velocities) can also be
provided in a similar manner::
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s)
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr, pm_dec=1*u.mas/u.yr)
As shown, the frame can be a `~astropy.coordinates.BaseCoordinateFrame`
class or the corresponding string alias. The frame classes that are built in
to astropy are `ICRS`, `FK5`, `FK4`, `FK4NoETerms`, and `Galactic`.
The string aliases are simply lower-case versions of the class name, and
allow for creating a `SkyCoord` object and transforming frames without
explicitly importing the frame classes.
Parameters
----------
frame : `~astropy.coordinates.BaseCoordinateFrame` class or string, optional
Type of coordinate frame this `SkyCoord` should represent. Defaults to
to ICRS if not given or given as None.
unit : `~astropy.units.Unit`, string, or tuple of :class:`~astropy.units.Unit` or str, optional
Units for supplied ``LON`` and ``LAT`` values, respectively. If
only one unit is supplied then it applies to both ``LON`` and
``LAT``.
obstime : valid `~astropy.time.Time` initializer, optional
Time of observation
equinox : valid `~astropy.time.Time` initializer, optional
Coordinate frame equinox
representation_type : str or Representation class
Specifies the representation, e.g. 'spherical', 'cartesian', or
'cylindrical'. This affects the positional args and other keyword args
which must correspond to the given representation.
copy : bool, optional
If `True` (default), a copy of any coordinate data is made. This
argument can only be passed in as a keyword argument.
**keyword_args
Other keyword arguments as applicable for user-defined coordinate frames.
Common options include:
ra, dec : valid `~astropy.coordinates.Angle` initializer, optional
RA and Dec for frames where ``ra`` and ``dec`` are keys in the
frame's ``representation_component_names``, including `ICRS`,
`FK5`, `FK4`, and `FK4NoETerms`.
pm_ra_cosdec, pm_dec : `~astropy.units.Quantity`, optional
Proper motion components, in angle per time units.
l, b : valid `~astropy.coordinates.Angle` initializer, optional
Galactic ``l`` and ``b`` for for frames where ``l`` and ``b`` are
keys in the frame's ``representation_component_names``, including
the `Galactic` frame.
pm_l_cosb, pm_b : `~astropy.units.Quantity`, optional
Proper motion components in the `Galactic` frame, in angle per time
units.
x, y, z : float or `~astropy.units.Quantity`, optional
Cartesian coordinates values
u, v, w : float or `~astropy.units.Quantity`, optional
Cartesian coordinates values for the Galactic frame.
radial_velocity : `~astropy.units.Quantity`, optional
The component of the velocity along the line-of-sight (i.e., the
radial direction), in velocity units.
"""
# Declare that SkyCoord can be used as a Table column by defining the
# info property.
info = SkyCoordInfo()
def __init__(self, *args, copy=True, **kwargs):
# Parse the args and kwargs to assemble a sanitized and validated
# kwargs dict for initializing attributes for this object and for
# creating the internal self._sky_coord_frame object
args = list(args) # Make it mutable
kwargs = self._parse_inputs(args, kwargs)
frame = kwargs['frame']
frame_attr_names = frame.get_frame_attr_names()
# these are frame attributes set on this SkyCoord but *not* a part of
# the frame object this SkyCoord contains
self._extra_frameattr_names = set()
for attr in kwargs:
if (attr not in frame_attr_names and
attr in frame_transform_graph.frame_attributes):
# Setting it will also validate it.
setattr(self, attr, kwargs[attr])
coord_kwargs = {}
component_names = frame.representation_component_names
component_names.update(frame.get_representation_component_names('s'))
# TODO: deprecate representation, remove this in future
_normalize_representation_type(kwargs)
if 'representation_type' in kwargs:
coord_kwargs['representation_type'] = _get_repr_cls(
kwargs['representation_type'])
if 'differential_type' in kwargs:
coord_kwargs['differential_type'] = _get_diff_cls(kwargs['differential_type'])
for attr, value in kwargs.items():
if value is not None and (attr in component_names
or attr in frame_attr_names):
coord_kwargs[attr] = value
# Finally make the internal coordinate object.
self._sky_coord_frame = frame.__class__(copy=copy, **coord_kwargs)
if not self._sky_coord_frame.has_data:
raise ValueError('Cannot create a SkyCoord without data')
@property
def frame(self):
return self._sky_coord_frame
@property
def representation_type(self):
return self.frame.representation_type
@representation_type.setter
def representation_type(self, value):
self.frame.representation_type = value
# TODO: deprecate these in future
@property
def representation(self):
return self.frame.representation
@representation.setter
def representation(self, value):
self.frame.representation = value
@property
def shape(self):
return self.frame.shape
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.misc.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
# create a new but empty instance, and copy over stuff
new = super().__new__(self.__class__)
new._sky_coord_frame = self._sky_coord_frame._apply(method,
*args, **kwargs)
new._extra_frameattr_names = self._extra_frameattr_names.copy()
for attr in self._extra_frameattr_names:
value = getattr(self, attr)
if getattr(value, 'size', 1) > 1:
value = apply_method(value)
elif method == 'copy' or method == 'flatten':
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, '_' + attr, value)
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if 'info' in self.__dict__:
new.info = self.info
return new
def _parse_inputs(self, args, kwargs):
"""
Assemble a validated and sanitized keyword args dict for instantiating a
SkyCoord and coordinate object from the provided `args`, and `kwargs`.
"""
valid_kwargs = {}
# Put the SkyCoord attributes like frame, equinox, obstime, location
# into valid_kwargs dict. `Frame` could come from args or kwargs, so
# set valid_kwargs['frame'] accordingly. The others must be specified
# by keyword args or else get a None default. Pop them off of kwargs
# in the process.
frame = valid_kwargs['frame'] = _get_frame(args, kwargs)
# TODO: possibly remove the below. The representation/differential
# information should *already* be stored in the frame object, as it is
# extracted in _get_frame. So it may be redundent to include it below.
# TODO: deprecate representation, remove this in future
_normalize_representation_type(kwargs)
if 'representation_type' in kwargs:
valid_kwargs['representation_type'] = _get_repr_cls(
kwargs.pop('representation_type'))
if 'differential_type' in kwargs:
valid_kwargs['differential_type'] = _get_diff_cls(
kwargs.pop('differential_type'))
for attr in frame_transform_graph.frame_attributes:
if attr in kwargs:
valid_kwargs[attr] = kwargs.pop(attr)
# Get units
units = _get_representation_component_units(args, kwargs)
# Grab any frame-specific attr names like `ra` or `l` or `distance` from
# kwargs and migrate to valid_kwargs.
valid_kwargs.update(_get_representation_attrs(frame, units, kwargs))
# Error if anything is still left in kwargs
if kwargs:
# The next few lines add a more user-friendly error message to a
# common and confusing situation when the user specifies, e.g.,
# `pm_ra` when they really should be passing `pm_ra_cosdec`. The
# extra error should only turn on when the positional representation
# is spherical, and when the component 'pm_<lon>' is passed.
pm_message = ''
if frame.representation_type == SphericalRepresentation:
frame_names = list(frame.get_representation_component_names().keys())
lon_name = frame_names[0]
lat_name = frame_names[1]
if 'pm_{0}'.format(lon_name) in list(kwargs.keys()):
pm_message = ('\n\n By default, most frame classes expect '
'the longitudinal proper motion to include '
'the cos(latitude) term, named '
'`pm_{0}_cos{1}`. Did you mean to pass in '
'this component?'
.format(lon_name, lat_name))
raise ValueError('Unrecognized keyword argument(s) {0}{1}'
.format(', '.join("'{0}'".format(key)
for key in kwargs),
pm_message))
# Finally deal with the unnamed args. This figures out what the arg[0]
# is and returns a dict with appropriate key/values for initializing
# frame class. Note that differentials are *never* valid args, only
# kwargs. So they are not accounted for here (unless they're in a frame
# or SkyCoord object)
if args:
if len(args) == 1:
# One arg which must be a coordinate. In this case
# coord_kwargs will contain keys like 'ra', 'dec', 'distance'
# along with any frame attributes like equinox or obstime which
# were explicitly specified in the coordinate object (i.e. non-default).
coord_kwargs = _parse_coordinate_arg(args[0], frame, units, kwargs)
# Copy other 'info' attr only if it has actually been defined.
if 'info' in getattr(args[0], '__dict__', ()):
self.info = args[0].info
elif len(args) <= 3:
frame_attr_names = frame.representation_component_names.keys()
repr_attr_names = frame.representation_component_names.values()
coord_kwargs = {}
for arg, frame_attr_name, repr_attr_name, unit in zip(args, frame_attr_names,
repr_attr_names, units):
attr_class = frame.representation.attr_classes[repr_attr_name]
coord_kwargs[frame_attr_name] = attr_class(arg, unit=unit)
else:
raise ValueError('Must supply no more than three positional arguments, got {}'
.format(len(args)))
# Copy the coord_kwargs into the final valid_kwargs dict. For each
# of the coord_kwargs ensure that there is no conflict with a value
# specified by the user in the original kwargs.
for attr, coord_value in coord_kwargs.items():
if (attr in valid_kwargs
and valid_kwargs[attr] is not None
and np.any(valid_kwargs[attr] != coord_value)):
raise ValueError("Coordinate attribute '{0}'={1!r} conflicts with "
"keyword argument '{0}'={2!r}"
.format(attr, coord_value, valid_kwargs[attr]))
valid_kwargs[attr] = coord_value
return valid_kwargs
def transform_to(self, frame, merge_attributes=True):
"""Transform this coordinate to a new frame.
The precise frame transformed to depends on ``merge_attributes``.
If `False`, the destination frame is used exactly as passed in.
But this is often not quite what one wants. E.g., suppose one wants to
transform an ICRS coordinate that has an obstime attribute to FK4; in
this case, one likely would want to use this information. Thus, the
default for ``merge_attributes`` is `True`, in which the precedence is
as follows: (1) explicitly set (i.e., non-default) values in the
destination frame; (2) explicitly set values in the source; (3) default
value in the destination frame.
Note that in either case, any explicitly set attributes on the source
`SkyCoord` that are not part of the destination frame's definition are
kept (stored on the resulting `SkyCoord`), and thus one can round-trip
(e.g., from FK4 to ICRS to FK4 without loosing obstime).
Parameters
----------
frame : str, `BaseCoordinateFrame` class or instance, or `SkyCoord` instance
The frame to transform this coordinate into. If a `SkyCoord`, the
underlying frame is extracted, and all other information ignored.
merge_attributes : bool, optional
Whether the default attributes in the destination frame are allowed
to be overridden by explicitly set attributes in the source
(see note above; default: `True`).
Returns
-------
coord : `SkyCoord`
A new object with this coordinate represented in the `frame` frame.
Raises
------
ValueError
If there is no possible transformation route.
"""
from astropy.coordinates.errors import ConvertError
frame_kwargs = {}
# Frame name (string) or frame class? Coerce into an instance.
try:
frame = _get_frame_class(frame)()
except Exception:
pass
if isinstance(frame, SkyCoord):
frame = frame.frame # Change to underlying coord frame instance
if isinstance(frame, BaseCoordinateFrame):
new_frame_cls = frame.__class__
# Get frame attributes, allowing defaults to be overridden by
# explicitly set attributes of the source if ``merge_attributes``.
for attr in frame_transform_graph.frame_attributes:
self_val = getattr(self, attr, None)
frame_val = getattr(frame, attr, None)
if (frame_val is not None and not
(merge_attributes and frame.is_frame_attr_default(attr))):
frame_kwargs[attr] = frame_val
elif (self_val is not None and
not self.is_frame_attr_default(attr)):
frame_kwargs[attr] = self_val
elif frame_val is not None:
frame_kwargs[attr] = frame_val
else:
raise ValueError('Transform `frame` must be a frame name, class, or instance')
# Get the composite transform to the new frame
trans = frame_transform_graph.get_transform(self.frame.__class__, new_frame_cls)
if trans is None:
raise ConvertError('Cannot transform from {0} to {1}'
.format(self.frame.__class__, new_frame_cls))
# Make a generic frame which will accept all the frame kwargs that
# are provided and allow for transforming through intermediate frames
# which may require one or more of those kwargs.
generic_frame = GenericFrame(frame_kwargs)
# Do the transformation, returning a coordinate frame of the desired
# final type (not generic).
new_coord = trans(self.frame, generic_frame)
# Finally make the new SkyCoord object from the `new_coord` and
# remaining frame_kwargs that are not frame_attributes in `new_coord`.
for attr in (set(new_coord.get_frame_attr_names()) &
set(frame_kwargs.keys())):
frame_kwargs.pop(attr)
return self.__class__(new_coord, **frame_kwargs)
def apply_space_motion(self, new_obstime=None, dt=None):
"""
Compute the position of the source represented by this coordinate object
to a new time using the velocities stored in this object and assuming
linear space motion (including relativistic corrections). This is
sometimes referred to as an "epoch transformation."
The initial time before the evolution is taken from the ``obstime``
attribute of this coordinate. Note that this method currently does not
support evolving coordinates where the *frame* has an ``obstime`` frame
attribute, so the ``obstime`` is only used for storing the before and
after times, not actually as an attribute of the frame. Alternatively,
if ``dt`` is given, an ``obstime`` need not be provided at all.
Parameters
----------
new_obstime : `~astropy.time.Time`, optional
The time at which to evolve the position to. Requires that the
``obstime`` attribute be present on this frame.
dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional
An amount of time to evolve the position of the source. Cannot be
given at the same time as ``new_obstime``.
Returns
-------
new_coord : `SkyCoord`
A new coordinate object with the evolved location of this coordinate
at the new time. ``obstime`` will be set on this object to the new
time only if ``self`` also has ``obstime``.
"""
if (new_obstime is None and dt is None or
new_obstime is not None and dt is not None):
raise ValueError("You must specify one of `new_obstime` or `dt`, "
"but not both.")
# Validate that we have velocity info
if 's' not in self.frame.data.differentials:
raise ValueError('SkyCoord requires velocity data to evolve the '
'position.')
if 'obstime' in self.frame.frame_attributes:
raise NotImplementedError("Updating the coordinates in a frame "
"with explicit time dependence is "
"currently not supported. If you would "
"like this functionality, please open an "
"issue on github:\n"
"https://github.com/astropy/astropy")
if new_obstime is not None and self.obstime is None:
# If no obstime is already on this object, raise an error if a new
# obstime is passed: we need to know the time / epoch at which the
# the position / velocity were measured initially
raise ValueError('This object has no associated `obstime`. '
'apply_space_motion() must receive a time '
'difference, `dt`, and not a new obstime.')
# Compute t1 and t2, the times used in the starpm call, which *only*
# uses them to compute a delta-time
t1 = self.obstime
if dt is None:
# self.obstime is not None and new_obstime is not None b/c of above
# checks
t2 = new_obstime
else:
# new_obstime is definitely None b/c of the above checks
if t1 is None:
# MAGIC NUMBER: if the current SkyCoord object has no obstime,
# assume J2000 to do the dt offset. This is not actually used
# for anything except a delta-t in starpm, so it's OK that it's
# not necessarily the "real" obstime
t1 = Time('J2000')
new_obstime = None # we don't actually know the inital obstime
t2 = t1 + dt
else:
t2 = t1 + dt
new_obstime = t2
# starpm wants tdb time
t1 = t1.tdb
t2 = t2.tdb
# proper motion in RA should not include the cos(dec) term, see the
# erfa function eraStarpv, comment (4). So we convert to the regular
# spherical differentials.
icrsrep = self.icrs.represent_as(SphericalRepresentation, SphericalDifferential)
icrsvel = icrsrep.differentials['s']
try:
plx = icrsrep.distance.to_value(u.arcsecond, u.parallax())
except u.UnitConversionError: # No distance: set to 0 by starpm convention
plx = 0.
try:
rv = icrsvel.d_distance.to_value(u.km/u.s)
except u.UnitConversionError: # No RV
rv = 0.
starpm = erfa.starpm(icrsrep.lon.radian, icrsrep.lat.radian,
icrsvel.d_lon.to_value(u.radian/u.yr),
icrsvel.d_lat.to_value(u.radian/u.yr),
plx, rv, t1.jd1, t1.jd2, t2.jd1, t2.jd2)
icrs2 = ICRS(ra=u.Quantity(starpm[0], u.radian, copy=False),
dec=u.Quantity(starpm[1], u.radian, copy=False),
pm_ra=u.Quantity(starpm[2], u.radian/u.yr, copy=False),
pm_dec=u.Quantity(starpm[3], u.radian/u.yr, copy=False),
distance=Distance(parallax=starpm[4] * u.arcsec, copy=False),
radial_velocity=u.Quantity(starpm[5], u.km/u.s, copy=False),
differential_type=SphericalDifferential)
# Update the obstime of the returned SkyCoord, and need to carry along
# the frame attributes
frattrs = {attrnm: getattr(self, attrnm)
for attrnm in self._extra_frameattr_names}
frattrs['obstime'] = new_obstime
return self.__class__(icrs2, **frattrs).transform_to(self.frame)
def __getattr__(self, attr):
"""
Overrides getattr to return coordinates that this can be transformed
to, based on the alias attr in the master transform graph.
"""
if '_sky_coord_frame' in self.__dict__:
if self.frame.name == attr:
return self # Should this be a deepcopy of self?
# Anything in the set of all possible frame_attr_names is handled
# here. If the attr is relevant for the current frame then delegate
# to self.frame otherwise get it from self._<attr>.
if attr in frame_transform_graph.frame_attributes:
if attr in self.frame.get_frame_attr_names():
return getattr(self.frame, attr)
else:
return getattr(self, '_' + attr, None)
# Some attributes might not fall in the above category but still
# are available through self._sky_coord_frame.
if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr):
return getattr(self._sky_coord_frame, attr)
# Try to interpret as a new frame for transforming.
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
return self.transform_to(attr)
# Fail
raise AttributeError("'{0}' object has no attribute '{1}'"
.format(self.__class__.__name__, attr))
def __setattr__(self, attr, val):
# This is to make anything available through __getattr__ immutable
if '_sky_coord_frame' in self.__dict__:
if self.frame.name == attr:
raise AttributeError("'{0}' is immutable".format(attr))
if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr):
setattr(self._sky_coord_frame, attr, val)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError("'{0}' is immutable".format(attr))
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be set, but only via a private
# variable. See __getattr__ above.
super().__setattr__('_' + attr, val)
# Validate it
frame_transform_graph.frame_attributes[attr].__get__(self)
# And add to set of extra attributes
self._extra_frameattr_names |= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__setattr__(attr, val)
def __delattr__(self, attr):
# mirror __setattr__ above
if '_sky_coord_frame' in self.__dict__:
if self.frame.name == attr:
raise AttributeError("'{0}' is immutable".format(attr))
if not attr.startswith('_') and hasattr(self._sky_coord_frame,
attr):
delattr(self._sky_coord_frame, attr)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError("'{0}' is immutable".format(attr))
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be deleted, but need to remove
# the corresponding private variable. See __getattr__ above.
super().__delattr__('_' + attr)
# Also remove it from the set of extra attributes
self._extra_frameattr_names -= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__delattr__(attr)
@override__dir__
def __dir__(self):
"""
Override the builtin `dir` behavior to include:
- Transforms available by aliases
- Attribute / methods of the underlying self.frame object
"""
# determine the aliases that this can be transformed to.
dir_values = set()
for name in frame_transform_graph.get_names():
frame_cls = frame_transform_graph.lookup_name(name)
if self.frame.is_transformable_to(frame_cls):
dir_values.add(name)
# Add public attributes of self.frame
dir_values.update(set(attr for attr in dir(self.frame) if not attr.startswith('_')))
# Add all possible frame attributes
dir_values.update(frame_transform_graph.frame_attributes.keys())
return dir_values
def __repr__(self):
clsnm = self.__class__.__name__
coonm = self.frame.__class__.__name__
frameattrs = self.frame._frame_attrs_repr()
if frameattrs:
frameattrs = ': ' + frameattrs
data = self.frame._data_repr()
if data:
data = ': ' + data
return '<{clsnm} ({coonm}{frameattrs}){data}>'.format(**locals())
def to_string(self, style='decimal', **kwargs):
"""
A string representation of the coordinates.
The default styles definitions are::
'decimal': 'lat': {'decimal': True, 'unit': "deg"}
'lon': {'decimal': True, 'unit': "deg"}
'dms': 'lat': {'unit': "deg"}
'lon': {'unit': "deg"}
'hmsdms': 'lat': {'alwayssign': True, 'pad': True, 'unit': "deg"}
'lon': {'pad': True, 'unit': "hour"}
See :meth:`~astropy.coordinates.Angle.to_string` for details and
keyword arguments (the two angles forming the coordinates are are
both :class:`~astropy.coordinates.Angle` instances). Keyword
arguments have precedence over the style defaults and are passed
to :meth:`~astropy.coordinates.Angle.to_string`.
Parameters
----------
style : {'hmsdms', 'dms', 'decimal'}
The formatting specification to use. These encode the three most
common ways to represent coordinates. The default is `decimal`.
kwargs
Keyword args passed to :meth:`~astropy.coordinates.Angle.to_string`.
"""
sph_coord = self.frame.represent_as(SphericalRepresentation)
styles = {'hmsdms': {'lonargs': {'unit': u.hour, 'pad': True},
'latargs': {'unit': u.degree, 'pad': True, 'alwayssign': True}},
'dms': {'lonargs': {'unit': u.degree},
'latargs': {'unit': u.degree}},
'decimal': {'lonargs': {'unit': u.degree, 'decimal': True},
'latargs': {'unit': u.degree, 'decimal': True}}
}
lonargs = {}
latargs = {}
if style in styles:
lonargs.update(styles[style]['lonargs'])
latargs.update(styles[style]['latargs'])
else:
raise ValueError('Invalid style. Valid options are: {0}'.format(",".join(styles)))
lonargs.update(kwargs)
latargs.update(kwargs)
if np.isscalar(sph_coord.lon.value):
coord_string = (sph_coord.lon.to_string(**lonargs)
+ " " +
sph_coord.lat.to_string(**latargs))
else:
coord_string = []
for lonangle, latangle in zip(sph_coord.lon.ravel(), sph_coord.lat.ravel()):
coord_string += [(lonangle.to_string(**lonargs)
+ " " +
latangle.to_string(**latargs))]
if len(sph_coord.shape) > 1:
coord_string = np.array(coord_string).reshape(sph_coord.shape)
return coord_string
def is_equivalent_frame(self, other):
"""
Checks if this object's frame as the same as that of the ``other``
object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. For two `SkyCoord` objects, *all* of the
frame attributes have to match, not just those relevant for the object's
frame.
Parameters
----------
other : SkyCoord or BaseCoordinateFrame
The other object to check.
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a `SkyCoord` or a `BaseCoordinateFrame` or subclass.
"""
if isinstance(other, BaseCoordinateFrame):
return self.frame.is_equivalent_frame(other)
elif isinstance(other, SkyCoord):
if other.frame.name != self.frame.name:
return False
for fattrnm in frame_transform_graph.frame_attributes:
if np.any(getattr(self, fattrnm) != getattr(other, fattrnm)):
return False
return True
else:
# not a BaseCoordinateFrame nor a SkyCoord object
raise TypeError("Tried to do is_equivalent_frame on something that "
"isn't frame-like")
# High-level convenience methods
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
.. note::
If the ``other`` coordinate object is in a different frame, it is
first transformed to the frame of this object. This can lead to
unintutive behavior if not accounted for. Particularly of note is
that ``self.separation(other)`` and ``other.separation(self)`` may
not give the same answer in this case.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
from . import Angle
from .angle_utilities import angular_separation
if not self.is_equivalent_frame(other):
try:
other = other.transform_to(self, merge_attributes=False)
except TypeError:
raise TypeError('Can only get separation to another SkyCoord '
'or a coordinate frame with data')
lon1 = self.spherical.lon
lat1 = self.spherical.lat
lon2 = other.spherical.lon
lat2 = other.spherical.lat
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(lon1, lat1, lon2, lat2)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
if not self.is_equivalent_frame(other):
try:
other = other.transform_to(self, merge_attributes=False)
except TypeError:
raise TypeError('Can only get separation to another SkyCoord '
'or a coordinate frame with data')
if issubclass(self.data.__class__, UnitSphericalRepresentation):
raise ValueError('This object does not have a distance; cannot '
'compute 3d separation.')
if issubclass(other.data.__class__, UnitSphericalRepresentation):
raise ValueError('The other object does not have a distance; '
'cannot compute 3d separation.')
c1 = self.cartesian.without_differentials()
c2 = other.cartesian.without_differentials()
return Distance((c1 - c2).norm())
def spherical_offsets_to(self, tocoord):
r"""
Computes angular offsets to go *from* this coordinate *to* another.
Parameters
----------
tocoord : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to offset to.
Returns
-------
lon_offset : `~astropy.coordinates.Angle`
The angular offset in the longitude direction (i.e., RA for
equatorial coordinates).
lat_offset : `~astropy.coordinates.Angle`
The angular offset in the latitude direction (i.e., Dec for
equatorial coordinates).
Raises
------
ValueError
If the ``tocoord`` is not in the same frame as this one. This is
different from the behavior of the `separation`/`separation_3d`
methods because the offset components depend critically on the
specific choice of frame.
Notes
-----
This uses the sky offset frame machinery, and hence will produce a new
sky offset frame if one does not already exist for this object's frame
class.
See Also
--------
separation : for the *total* angular offset (not broken out into components)
"""
if not self.is_equivalent_frame(tocoord):
raise ValueError('Tried to use spherical_offsets_to with two non-matching frames!')
aframe = self.skyoffset_frame()
acoord = tocoord.transform_to(aframe)
dlon = acoord.spherical.lon.view(Angle)
dlat = acoord.spherical.lat.view(Angle)
return dlon, dlat
def match_to_catalog_sky(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest on-sky matches of this coordinate in a set of
catalog coordinates.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is ``2``,
for matching a coordinate catalog against *itself* (``1``
is inappropriate because each point will find itself as the
closest match).
Returns
-------
idx : integer array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity`
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object. Unless both this and ``catalogcoord`` have associated
distances, this quantity assumes that all sources are at a
distance of 1 (dimensionless).
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_sky
SkyCoord.match_to_catalog_3d
"""
from .matching import match_coordinates_sky
if (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data):
self_in_catalog_frame = self.transform_to(catalogcoord)
else:
raise TypeError('Can only get separation to another SkyCoord or a '
'coordinate frame with data')
res = match_coordinates_sky(self_in_catalog_frame, catalogcoord,
nthneighbor=nthneighbor,
storekdtree='_kdtree_sky')
return res
def match_to_catalog_3d(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest 3-dimensional matches of this coordinate to a set
of catalog coordinates.
This finds the 3-dimensional closest neighbor, which is only different
from the on-sky distance if ``distance`` is set in this object or the
``catalogcoord`` object.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is
``2``, for matching a coordinate catalog against *itself*
(``1`` is inappropriate because each point will find
itself as the closest match).
Returns
-------
idx : integer array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity`
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_3d
SkyCoord.match_to_catalog_sky
"""
from .matching import match_coordinates_3d
if (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data):
self_in_catalog_frame = self.transform_to(catalogcoord)
else:
raise TypeError('Can only get separation to another SkyCoord or a '
'coordinate frame with data')
res = match_coordinates_3d(self_in_catalog_frame, catalogcoord,
nthneighbor=nthneighbor,
storekdtree='_kdtree_3d')
return res
def search_around_sky(self, searcharoundcoords, seplimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given on-sky separation.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation`.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinates to search around to try to find matching points in
this `SkyCoord`. This should be an object with array coordinates,
not a scalar coordinate object.
seplimit : `~astropy.units.Quantity` with angle units
The on-sky separation to search within.
Returns
-------
idxsearcharound : integer array
Indices into ``self`` that matches to the corresponding element of
``idxself``. Shape matches ``idxself``.
idxself : integer array
Indices into ``searcharoundcoords`` that matches to the
corresponding element of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ (>=0.12.0) to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_sky
SkyCoord.search_around_3d
"""
from .matching import search_around_sky
return search_around_sky(searcharoundcoords, self, seplimit,
storekdtree='_kdtree_sky')
def search_around_3d(self, searcharoundcoords, distlimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given 3D radius.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation_3d`.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinates to search around to try to find matching points in
this `SkyCoord`. This should be an object with array coordinates,
not a scalar coordinate object.
distlimit : `~astropy.units.Quantity` with distance units
The physical radius to search within.
Returns
-------
idxsearcharound : integer array
Indices into ``self`` that matches to the corresponding element of
``idxself``. Shape matches ``idxself``.
idxself : integer array
Indices into ``searcharoundcoords`` that matches to the
corresponding element of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ (>=0.12.0) to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_3d
SkyCoord.search_around_sky
"""
from .matching import search_around_3d
return search_around_3d(searcharoundcoords, self, distlimit,
storekdtree='_kdtree_3d')
def position_angle(self, other):
"""
Computes the on-sky position angle (East of North) between this
`SkyCoord` and another.
Parameters
----------
other : `SkyCoord`
The other coordinate to compute the position angle to. It is
treated as the "head" of the vector of the position angle.
Returns
-------
pa : `~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from ``self``
to ``other``. If either ``self`` or ``other`` contain arrays, this
will be an array following the appropriate `numpy` broadcasting
rules.
Examples
--------
>>> c1 = SkyCoord(0*u.deg, 0*u.deg)
>>> c2 = SkyCoord(1*u.deg, 0*u.deg)
>>> c1.position_angle(c2).degree
90.0
>>> c3 = SkyCoord(1*u.deg, 1*u.deg)
>>> c1.position_angle(c3).degree # doctest: +FLOAT_CMP
44.995636455344844
"""
from . import angle_utilities
if not self.is_equivalent_frame(other):
try:
other = other.transform_to(self, merge_attributes=False)
except TypeError:
raise TypeError('Can only get position_angle to another '
'SkyCoord or a coordinate frame with data')
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
olat = other.represent_as(UnitSphericalRepresentation).lat
olon = other.represent_as(UnitSphericalRepresentation).lon
return angle_utilities.position_angle(slon, slat, olon, olat)
def skyoffset_frame(self, rotation=None):
"""
Returns the sky offset frame with this `SkyCoord` at the origin.
Returns
-------
astrframe : `~astropy.coordinates.SkyOffsetFrame`
A sky offset frame of the same type as this `SkyCoord` (e.g., if
this object has an ICRS coordinate, the resulting frame is
SkyOffsetICRS, with the origin set to this object)
rotation : `~astropy.coordinates.Angle` or `~astropy.units.Quantity` with angle units
The final rotation of the frame about the ``origin``. The sign of
the rotation is the left-hand rule. That is, an object at a
particular position angle in the un-rotated system will be sent to
the positive latitude (z) direction in the final frame.
"""
return SkyOffsetFrame(origin=self, rotation=rotation)
def get_constellation(self, short_name=False, constellation_list='iau'):
"""
Determines the constellation(s) of the coordinates this `SkyCoord`
contains.
Parameters
----------
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If this is a scalar coordinate, returns the name of the
constellation. If it is an array `SkyCoord`, it returns an array of
names.
Notes
-----
To determine which constellation a point on the sky is in, this first
precesses to B1875, and then uses the Delporte boundaries of the 88
modern constellations, as tabulated by
`Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_.
See Also
--------
astropy.coordinates.get_constellation
"""
from .funcs import get_constellation
# because of issue #7028, the conversion to a PrecessedGeocentric
# system fails in some cases. Work around is to drop the velocities.
# they are not needed here since only position infromation is used
extra_frameattrs = {nm: getattr(self, nm)
for nm in self._extra_frameattr_names}
novel = SkyCoord(self.realize_frame(self.data.without_differentials()),
**extra_frameattrs)
return get_constellation(novel, short_name, constellation_list)
# the simpler version below can be used when gh-issue #7028 is resolved
#return get_constellation(self, short_name, constellation_list)
# WCS pixel to/from sky conversions
def to_pixel(self, wcs, origin=0, mode='all'):
"""
Convert this coordinate to pixel coordinates using a `~astropy.wcs.WCS`
object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.wcs.utils.skycoord_to_pixel : the implementation of this method
"""
return skycoord_to_pixel(self, wcs=wcs, origin=origin, mode=mode)
@classmethod
def from_pixel(cls, xp, yp, wcs, origin=0, mode='all'):
"""
Create a new `SkyCoord` from pixel coordinates using an
`~astropy.wcs.WCS` object.
Parameters
----------
xp, yp : float or `numpy.ndarray`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
coord : an instance of this class
A new object with sky coordinates corresponding to the input ``xp``
and ``yp``.
See Also
--------
to_pixel : to do the inverse operation
astropy.wcs.utils.pixel_to_skycoord : the implementation of this method
"""
return pixel_to_skycoord(xp, yp, wcs=wcs, origin=origin, mode=mode, cls=cls)
def radial_velocity_correction(self, kind='barycentric', obstime=None,
location=None):
"""
Compute the correction required to convert a radial velocity at a given
time and place on the Earth's Surface to a barycentric or heliocentric
velocity.
Parameters
----------
kind : str
The kind of velocity correction. Must be 'barycentric' or
'heliocentric'.
obstime : `~astropy.time.Time` or None, optional
The time at which to compute the correction. If `None`, the
``obstime`` frame attribute on the `SkyCoord` will be used.
location : `~astropy.coordinates.EarthLocation` or None, optional
The observer location at which to compute the correction. If
`None`, the ``location`` frame attribute on the passed-in
``obstime`` will be used, and if that is None, the ``location``
frame attribute on the `SkyCoord` will be used.
Raises
------
ValueError
If either ``obstime`` or ``location`` are passed in (not ``None``)
when the frame attribute is already set on this `SkyCoord`.
TypeError
If ``obstime`` or ``location`` aren't provided, either as arguments
or as frame attributes.
Returns
-------
vcorr : `~astropy.units.Quantity` with velocity units
The correction with a positive sign. I.e., *add* this
to an observed radial velocity to get the barycentric (or
heliocentric) velocity. If m/s precision or better is needed,
see the notes below.
Notes
-----
The barycentric correction is calculated to higher precision than the
heliocentric correction and includes additional physics (e.g time dilation).
Use barycentric corrections if m/s precision is required.
The algorithm here is sufficient to perform corrections at the mm/s level, but
care is needed in application. Strictly speaking, the barycentric correction is
multiplicative and should be applied as::
sc = SkyCoord(1*u.deg, 2*u.deg)
vcorr = sc.rv_correction(kind='barycentric', obstime=t, location=loc)
rv = rv + vcorr + rv * vcorr / consts.c
If your target is nearby and/or has finite proper motion you may need to account
for terms arising from this. See Wright & Eastmann (2014) for details.
The default is for this method to use the builtin ephemeris for
computing the sun and earth location. Other ephemerides can be chosen
by setting the `~astropy.coordinates.solar_system_ephemeris` variable,
either directly or via ``with`` statement. For example, to use the JPL
ephemeris, do::
sc = SkyCoord(1*u.deg, 2*u.deg)
with coord.solar_system_ephemeris.set('jpl'):
rv += sc.rv_correction(obstime=t, location=loc)
"""
# has to be here to prevent circular imports
from .solar_system import get_body_barycentric_posvel, get_body_barycentric
# location validation
timeloc = getattr(obstime, 'location', None)
if location is None:
if self.location is not None:
location = self.location
if timeloc is not None:
raise ValueError('`location` cannot be in both the '
'passed-in `obstime` and this `SkyCoord` '
'because it is ambiguous which is meant '
'for the radial_velocity_correction.')
elif timeloc is not None:
location = timeloc
else:
raise TypeError('Must provide a `location` to '
'radial_velocity_correction, either as a '
'SkyCoord frame attribute, as an attribute on '
'the passed in `obstime`, or in the method '
'call.')
elif self.location is not None or timeloc is not None:
raise ValueError('Cannot compute radial velocity correction if '
'`location` argument is passed in and there is '
'also a `location` attribute on this SkyCoord or '
'the passed-in `obstime`.')
# obstime validation
if obstime is None:
obstime = self.obstime
if obstime is None:
raise TypeError('Must provide an `obstime` to '
'radial_velocity_correction, either as a '
'SkyCoord frame attribute or in the method '
'call.')
elif self.obstime is not None:
raise ValueError('Cannot compute radial velocity correction if '
'`obstime` argument is passed in and it is '
'inconsistent with the `obstime` frame '
'attribute on the SkyCoord')
pos_earth, v_earth = get_body_barycentric_posvel('earth', obstime)
if kind == 'barycentric':
v_origin_to_earth = v_earth
elif kind == 'heliocentric':
v_sun = get_body_barycentric_posvel('sun', obstime)[1]
v_origin_to_earth = v_earth - v_sun
else:
raise ValueError("`kind` argument to radial_velocity_correction must "
"be 'barycentric' or 'heliocentric', but got "
"'{}'".format(kind))
gcrs_p, gcrs_v = location.get_gcrs_posvel(obstime)
# transforming to GCRS is not the correct thing to do here, since we don't want to
# include aberration (or light deflection)? Instead, only apply parallax if necessary
if self.data.__class__ is UnitSphericalRepresentation:
targcart = self.icrs.cartesian
else:
# skycoord has distances so apply parallax
obs_icrs_cart = pos_earth + gcrs_p
icrs_cart = self.icrs.cartesian
targcart = icrs_cart - obs_icrs_cart
targcart /= targcart.norm()
if kind == 'barycentric':
beta_obs = (v_origin_to_earth + gcrs_v) / speed_of_light
gamma_obs = 1 / np.sqrt(1 - beta_obs.norm()**2)
gr = location.gravitational_redshift(obstime)
# barycentric redshift according to eq 28 in Wright & Eastmann (2014),
# neglecting Shapiro delay and effects of the star's own motion
zb = gamma_obs * (1 + targcart.dot(beta_obs)) / (1 + gr/speed_of_light) - 1
return zb * speed_of_light
else:
# do a simpler correction ignoring time dilation and gravitational redshift
# this is adequate since Heliocentric corrections shouldn't be used if
# cm/s precision is required.
return targcart.dot(v_origin_to_earth + gcrs_v)
# Table interactions
@classmethod
def guess_from_table(cls, table, **coord_kwargs):
r"""
A convenience method to create and return a new `SkyCoord` from the data
in an astropy Table.
This method matches table columns that start with the case-insensitive
names of the the components of the requested frames, if they are also
followed by a non-alphanumeric character. It will also match columns
that *end* with the component name if a non-alphanumeric character is
*before* it.
For example, the first rule means columns with names like
``'RA[J2000]'`` or ``'ra'`` will be interpreted as ``ra`` attributes for
`~astropy.coordinates.ICRS` frames, but ``'RAJ2000'`` or ``'radius'``
are *not*. Similarly, the second rule applied to the
`~astropy.coordinates.Galactic` frame means that a column named
``'gal_l'`` will be used as the the ``l`` component, but ``gall`` or
``'fill'`` will not.
The definition of alphanumeric here is based on Unicode's definition
of alphanumeric, except without ``_`` (which is normally considered
alphanumeric). So for ASCII, this means the non-alphanumeric characters
are ``<space>_!"#$%&'()*+,-./\:;<=>?@[]^`{|}~``).
Parameters
----------
table : astropy.Table
The table to load data from.
coord_kwargs
Any additional keyword arguments are passed directly to this class's
constructor.
Returns
-------
newsc : same as this class
The new `SkyCoord` (or subclass) object.
"""
inital_frame = coord_kwargs.get('frame')
frame = _get_frame([], coord_kwargs)
coord_kwargs['frame'] = inital_frame
comp_kwargs = {}
for comp_name in frame.representation_component_names:
# this matches things like 'ra[...]'' but *not* 'rad'.
# note that the "_" must be in there explicitly, because
# "alphanumeric" usually includes underscores.
starts_with_comp = comp_name + r'(\W|\b|_)'
# this part matches stuff like 'center_ra', but *not*
# 'aura'
ends_with_comp = r'.*(\W|\b|_)' + comp_name + r'\b'
# the final regex ORs together the two patterns
rex = re.compile('(' + starts_with_comp + ')|(' + ends_with_comp + ')',
re.IGNORECASE | re.UNICODE)
for col_name in table.colnames:
if rex.match(col_name):
if comp_name in comp_kwargs:
oldname = comp_kwargs[comp_name].name
msg = ('Found at least two matches for component "{0}"'
': "{1}" and "{2}". Cannot continue with this '
'ambiguity.')
raise ValueError(msg.format(comp_name, oldname, col_name))
comp_kwargs[comp_name] = table[col_name]
for k, v in comp_kwargs.items():
if k in coord_kwargs:
raise ValueError('Found column "{0}" in table, but it was '
'already provided as "{1}" keyword to '
'guess_from_table function.'.format(v.name, k))
else:
coord_kwargs[k] = v
return cls(**coord_kwargs)
# Name resolve
@classmethod
def from_name(cls, name, frame='icrs'):
"""
Given a name, query the CDS name resolver to attempt to retrieve
coordinate information for that object. The search database, sesame
url, and query timeout can be set through configuration items in
``astropy.coordinates.name_resolve`` -- see docstring for
`~astropy.coordinates.get_icrs_coordinates` for more
information.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
frame : str or `BaseCoordinateFrame` class or instance
The frame to transform the object to.
Returns
-------
coord : SkyCoord
Instance of the SkyCoord class.
"""
from .name_resolve import get_icrs_coordinates
icrs_coord = get_icrs_coordinates(name)
icrs_sky_coord = cls(icrs_coord)
if frame in ('icrs', icrs_coord.__class__):
return icrs_sky_coord
else:
return icrs_sky_coord.transform_to(frame)
# <----------------Private utility functions below here------------------------->
def _get_frame_class(frame):
"""
Get a frame class from the input `frame`, which could be a frame name
string, or frame class.
"""
import inspect
if isinstance(frame, str):
frame_names = frame_transform_graph.get_names()
if frame not in frame_names:
raise ValueError('Coordinate frame {0} not in allowed values {1}'
.format(frame, sorted(frame_names)))
frame_cls = frame_transform_graph.lookup_name(frame)
elif inspect.isclass(frame) and issubclass(frame, BaseCoordinateFrame):
frame_cls = frame
else:
raise ValueError('Coordinate frame must be a frame name or frame class')
return frame_cls
def _get_frame(args, kwargs):
"""
Determine the coordinate frame from input SkyCoord args and kwargs. This
modifies args and/or kwargs in-place to remove the item that provided
`frame`. It also infers the frame if an input coordinate was provided and
checks for conflicts.
This allows for frame to be specified as a string like 'icrs' or a frame
class like ICRS, but not an instance ICRS() since the latter could have
non-default representation attributes which would require a three-way merge.
"""
frame = kwargs.pop('frame', None)
if frame is None and len(args) > 1:
# We do not allow frames to be passed as positional arguments if data
# is passed separately from frame.
for arg in args:
if isinstance(arg, (SkyCoord, BaseCoordinateFrame)):
raise ValueError("{0} instance cannot be passed as a positional "
"argument for the frame, pass it using the "
"frame= keyword instead.".format(arg.__class__.__name__))
# If the frame is an instance or SkyCoord, we split up the attributes and
# make it into a class.
if isinstance(frame, SkyCoord):
# Copy any extra attributes if they are not explicitly given.
for attr in frame._extra_frameattr_names:
kwargs.setdefault(attr, getattr(frame, attr))
frame = frame.frame
if isinstance(frame, BaseCoordinateFrame):
for attr in frame.get_frame_attr_names():
if attr in kwargs:
raise ValueError("cannot specify frame attribute '{0}' directly in SkyCoord since a frame instance was passed in".format(attr))
else:
kwargs[attr] = getattr(frame, attr)
frame = frame.__class__
if frame is not None:
# Frame was provided as kwarg so validate and coerce into corresponding frame.
frame_cls = _get_frame_class(frame)
frame_specified_explicitly = True
else:
# Look for the frame in args
for arg in args:
try:
frame_cls = _get_frame_class(arg)
frame_specified_explicitly = True
except ValueError:
pass
else:
args.remove(arg)
warnings.warn("Passing a frame as a positional argument is now "
"deprecated, use the frame= keyword argument "
"instead.", AstropyDeprecationWarning)
break
else:
# Not in args nor kwargs - default to icrs
frame_cls = ICRS
frame_specified_explicitly = False
# Check that the new frame doesn't conflict with existing coordinate frame
# if a coordinate is supplied in the args list. If the frame still had not
# been set by this point and a coordinate was supplied, then use that frame.
for arg in args:
# this catches the "single list passed in" case. For that case we want
# to allow the first argument to set the class. That's OK because
# _parse_coordinate_arg goes and checks that the frames match between
# the first and all the others
if (isinstance(arg, (collections.Sequence, np.ndarray)) and
len(args) == 1 and len(arg) > 0):
arg = arg[0]
coord_frame_obj = coord_frame_cls = None
if isinstance(arg, BaseCoordinateFrame):
coord_frame_obj = arg
elif isinstance(arg, SkyCoord):
coord_frame_obj = arg.frame
if coord_frame_obj is not None:
coord_frame_cls = coord_frame_obj.__class__
frame_diff = coord_frame_obj.get_representation_cls('s')
if frame_diff is not None:
# we do this check because otherwise if there's no default
# differential (i.e. it is None), the code below chokes. but
# None still gets through if the user *requests* it
kwargs.setdefault('differential_type', frame_diff)
if coord_frame_cls is not None:
if not frame_specified_explicitly:
frame_cls = coord_frame_cls
elif frame_cls is not coord_frame_cls:
raise ValueError("Cannot override frame='{0}' of input coordinate with "
"new frame='{1}'. Instead transform the coordinate."
.format(coord_frame_cls.__name__, frame_cls.__name__))
frame_cls_kwargs = {}
# TODO: deprecate representation, remove this in future
_normalize_representation_type(kwargs)
if 'representation_type' in kwargs:
frame_cls_kwargs['representation_type'] = _get_repr_cls(
kwargs['representation_type'])
if 'differential_type' in kwargs:
frame_cls_kwargs['differential_type'] = _get_diff_cls(
kwargs['differential_type'])
return frame_cls(**frame_cls_kwargs)
def _get_representation_component_units(args, kwargs):
"""
Get the unit from kwargs for the *representation* components (not the
differentials).
"""
if 'unit' not in kwargs:
units = [None, None, None]
else:
units = kwargs.pop('unit')
if isinstance(units, str):
units = [x.strip() for x in units.split(',')]
# Allow for input like unit='deg' or unit='m'
if len(units) == 1:
units = [units[0], units[0], units[0]]
elif isinstance(units, (Unit, IrreducibleUnit)):
units = [units, units, units]
try:
units = [(Unit(x) if x else None) for x in units]
units.extend(None for x in range(3 - len(units)))
if len(units) > 3:
raise ValueError()
except Exception:
raise ValueError('Unit keyword must have one to three unit values as '
'tuple or comma-separated string')
return units
def _parse_coordinate_arg(coords, frame, units, init_kwargs):
"""
Single unnamed arg supplied. This must be:
- Coordinate frame with data
- Representation
- SkyCoord
- List or tuple of:
- String which splits into two values
- Iterable with two values
- SkyCoord, frame, or representation objects.
Returns a dict mapping coordinate attribute names to values (or lists of
values)
"""
is_scalar = False # Differentiate between scalar and list input
valid_kwargs = {} # Returned dict of lon, lat, and distance (optional)
frame_attr_names = list(frame.representation_component_names.keys())
repr_attr_names = list(frame.representation_component_names.values())
repr_attr_classes = list(frame.representation.attr_classes.values())
n_attr_names = len(repr_attr_names)
# Turn a single string into a list of strings for convenience
if isinstance(coords, str):
is_scalar = True
coords = [coords]
if isinstance(coords, (SkyCoord, BaseCoordinateFrame)):
# Note that during parsing of `frame` it is checked that any coordinate
# args have the same frame as explicitly supplied, so don't worry here.
if not coords.has_data:
raise ValueError('Cannot initialize from a frame without coordinate data')
data = coords.data.represent_as(frame.representation_type)
values = [] # List of values corresponding to representation attrs
repr_attr_name_to_drop = []
for repr_attr_name in repr_attr_names:
# If coords did not have an explicit distance then don't include in initializers.
if (isinstance(coords.data, UnitSphericalRepresentation) and
repr_attr_name == 'distance'):
repr_attr_name_to_drop.append(repr_attr_name)
continue
# Get the value from `data` in the eventual representation
values.append(getattr(data, repr_attr_name))
# drop the ones that were skipped because they were distances
for nametodrop in repr_attr_name_to_drop:
nameidx = repr_attr_names.index(nametodrop)
del repr_attr_names[nameidx]
del units[nameidx]
del frame_attr_names[nameidx]
del repr_attr_classes[nameidx]
if coords.data.differentials and 's' in coords.data.differentials:
orig_vel = coords.data.differentials['s']
vel = coords.data.represent_as(frame.representation, frame.get_representation_cls('s')).differentials['s']
for frname, reprname in frame.get_representation_component_names('s').items():
if (reprname == 'd_distance' and not hasattr(orig_vel, reprname) and
'unit' in orig_vel.get_name()):
continue
values.append(getattr(vel, reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(vel.attr_classes[reprname])
for attr in frame_transform_graph.frame_attributes:
value = getattr(coords, attr, None)
use_value = (isinstance(coords, SkyCoord)
or attr not in coords._attr_names_with_defaults)
if use_value and value is not None:
valid_kwargs[attr] = value
elif isinstance(coords, BaseRepresentation):
if coords.differentials and 's' in coords.differentials:
diffs = frame.get_representation_cls('s')
data = coords.represent_as(frame.representation_type, diffs)
values = [getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names]
for frname, reprname in frame.get_representation_component_names('s').items():
values.append(getattr(data.differentials['s'], reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(data.differentials['s'].attr_classes[reprname])
else:
data = coords.represent_as(frame.representation)
values = [getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names]
elif (isinstance(coords, np.ndarray) and coords.dtype.kind in 'if'
and coords.ndim == 2 and coords.shape[1] <= 3):
# 2-d array of coordinate values. Handle specially for efficiency.
values = coords.transpose() # Iterates over repr attrs
elif isinstance(coords, (collections.Sequence, np.ndarray)):
# Handles list-like input.
vals = []
is_ra_dec_representation = ('ra' in frame.representation_component_names and
'dec' in frame.representation_component_names)
coord_types = (SkyCoord, BaseCoordinateFrame, BaseRepresentation)
if any(isinstance(coord, coord_types) for coord in coords):
# this parsing path is used when there are coordinate-like objects
# in the list - instead of creating lists of values, we create
# SkyCoords from the list elements and then combine them.
scs = [SkyCoord(coord, **init_kwargs) for coord in coords]
# Check that all frames are equivalent
for sc in scs[1:]:
if not sc.is_equivalent_frame(scs[0]):
raise ValueError("List of inputs don't have equivalent "
"frames: {0} != {1}".format(sc, scs[0]))
# Now use the first to determine if they are all UnitSpherical
allunitsphrepr = isinstance(scs[0].data, UnitSphericalRepresentation)
# get the frame attributes from the first coord in the list, because
# from the above we know it matches all the others. First copy over
# the attributes that are in the frame itself, then copy over any
# extras in the SkyCoord
for fattrnm in scs[0].frame.frame_attributes:
valid_kwargs[fattrnm] = getattr(scs[0].frame, fattrnm)
for fattrnm in scs[0]._extra_frameattr_names:
valid_kwargs[fattrnm] = getattr(scs[0], fattrnm)
# Now combine the values, to be used below
values = []
for data_attr_name, repr_attr_name in zip(frame_attr_names, repr_attr_names):
if allunitsphrepr and repr_attr_name == 'distance':
# if they are *all* UnitSpherical, don't give a distance
continue
data_vals = []
for sc in scs:
data_val = getattr(sc, data_attr_name)
data_vals.append(data_val.reshape(1,) if sc.isscalar else data_val)
concat_vals = np.concatenate(data_vals)
# Hack because np.concatenate doesn't fully work with Quantity
if isinstance(concat_vals, u.Quantity):
concat_vals._unit = data_val.unit
values.append(concat_vals)
else:
# none of the elements are "frame-like"
# turn into a list of lists like [[v1_0, v2_0, v3_0], ... [v1_N, v2_N, v3_N]]
for coord in coords:
if isinstance(coord, str):
coord1 = coord.split()
if len(coord1) == 6:
coord = (' '.join(coord1[:3]), ' '.join(coord1[3:]))
elif is_ra_dec_representation:
coord = _parse_ra_dec(coord)
else:
coord = coord1
vals.append(coord) # Assumes coord is a sequence at this point
# Do some basic validation of the list elements: all have a length and all
# lengths the same
try:
n_coords = sorted(set(len(x) for x in vals))
except Exception:
raise ValueError('One or more elements of input sequence does not have a length')
if len(n_coords) > 1:
raise ValueError('Input coordinate values must have same number of elements, found {0}'
.format(n_coords))
n_coords = n_coords[0]
# Must have no more coord inputs than representation attributes
if n_coords > n_attr_names:
raise ValueError('Input coordinates have {0} values but '
'representation {1} only accepts {2}'
.format(n_coords,
frame.representation_type.get_name(),
n_attr_names))
# Now transpose vals to get [(v1_0 .. v1_N), (v2_0 .. v2_N), (v3_0 .. v3_N)]
# (ok since we know it is exactly rectangular). (Note: can't just use zip(*values)
# because Longitude et al distinguishes list from tuple so [a1, a2, ..] is needed
# while (a1, a2, ..) doesn't work.
values = [list(x) for x in zip(*vals)]
if is_scalar:
values = [x[0] for x in values]
else:
raise ValueError('Cannot parse coordinates from first argument')
# Finally we have a list of values from which to create the keyword args
# for the frame initialization. Validate by running through the appropriate
# class initializer and supply units (which might be None).
try:
for frame_attr_name, repr_attr_class, value, unit in zip(
frame_attr_names, repr_attr_classes, values, units):
valid_kwargs[frame_attr_name] = repr_attr_class(value, unit=unit,
copy=False)
except Exception as err:
raise ValueError('Cannot parse first argument data "{0}" for attribute '
'{1}'.format(value, frame_attr_name), err)
return valid_kwargs
def _get_representation_attrs(frame, units, kwargs):
"""
Find instances of the "representation attributes" for specifying data
for this frame. Pop them off of kwargs, run through the appropriate class
constructor (to validate and apply unit), and put into the output
valid_kwargs. "Representation attributes" are the frame-specific aliases
for the underlying data values in the representation, e.g. "ra" for "lon"
for many equatorial spherical representations, or "w" for "x" in the
cartesian representation of Galactic.
This also gets any *differential* kwargs, because they go into the same
frame initializer later on.
"""
frame_attr_names = frame.representation_component_names.keys()
repr_attr_classes = frame.representation_type.attr_classes.values()
valid_kwargs = {}
for frame_attr_name, repr_attr_class, unit in zip(frame_attr_names, repr_attr_classes, units):
value = kwargs.pop(frame_attr_name, None)
if value is not None:
valid_kwargs[frame_attr_name] = repr_attr_class(value, unit=unit)
# also check the differentials. They aren't included in the units keyword,
# so we only look for the names.
differential_type = frame.differential_type
if differential_type is not None:
for frame_name, repr_name in frame.get_representation_component_names('s').items():
diff_attr_class = differential_type.attr_classes[repr_name]
value = kwargs.pop(frame_name, None)
if value is not None:
valid_kwargs[frame_name] = diff_attr_class(value)
return valid_kwargs
def _parse_ra_dec(coord_str):
"""
Parse RA and Dec values from a coordinate string. Currently the
following formats are supported:
* space separated 6-value format
* space separated <6-value format, this requires a plus or minus sign
separation between RA and Dec
* sign separated format
* JHHMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits
* JDDDMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits
Parameters
----------
coord_str : str
Coordinate string to parse.
Returns
-------
coord : str or list of str
Parsed coordinate values.
"""
if isinstance(coord_str, str):
coord1 = coord_str.split()
else:
# This exception should never be raised from SkyCoord
raise TypeError('coord_str must be a single str')
if len(coord1) == 6:
coord = (' '.join(coord1[:3]), ' '.join(coord1[3:]))
elif len(coord1) > 2:
coord = PLUS_MINUS_RE.split(coord_str)
coord = (coord[0], ' '.join(coord[1:]))
elif len(coord1) == 1:
match_j = J_PREFIXED_RA_DEC_RE.match(coord_str)
if match_j:
coord = match_j.groups()
if len(coord[0].split('.')[0]) == 7:
coord = ('{0} {1} {2}'.
format(coord[0][0:3], coord[0][3:5], coord[0][5:]),
'{0} {1} {2}'.
format(coord[1][0:3], coord[1][3:5], coord[1][5:]))
else:
coord = ('{0} {1} {2}'.
format(coord[0][0:2], coord[0][2:4], coord[0][4:]),
'{0} {1} {2}'.
format(coord[1][0:3], coord[1][3:5], coord[1][5:]))
else:
coord = PLUS_MINUS_RE.split(coord_str)
coord = (coord[0], ' '.join(coord[1:]))
else:
coord = coord1
return coord
|
37eccbcb85ebff5cb7cd7537af71fab4ee870b946c6ede9094637d7ca1802258 | """
In this module, we define the coordinate representation classes, which are
used to represent low-level cartesian, spherical, cylindrical, and other
coordinates.
"""
import abc
import functools
import operator
from collections import OrderedDict
import inspect
import warnings
import numpy as np
import astropy.units as u
from .angles import Angle, Longitude, Latitude
from .distances import Distance
from .._erfa import ufunc as erfa_ufunc
from ..utils import ShapedLikeNDArray, classproperty
from ..utils import deprecated_attribute
from ..utils.exceptions import AstropyDeprecationWarning
from ..utils.misc import InheritDocstrings
from ..utils.compat import NUMPY_LT_1_14
__all__ = ["BaseRepresentationOrDifferential", "BaseRepresentation",
"CartesianRepresentation", "SphericalRepresentation",
"UnitSphericalRepresentation", "RadialRepresentation",
"PhysicsSphericalRepresentation", "CylindricalRepresentation",
"BaseDifferential", "CartesianDifferential",
"BaseSphericalDifferential", "BaseSphericalCosLatDifferential",
"SphericalDifferential", "SphericalCosLatDifferential",
"UnitSphericalDifferential", "UnitSphericalCosLatDifferential",
"RadialDifferential", "CylindricalDifferential",
"PhysicsSphericalDifferential"]
# Module-level dict mapping representation string alias names to classes.
# This is populated by the metaclass init so all representation and differential
# classes get registered automatically.
REPRESENTATION_CLASSES = {}
DIFFERENTIAL_CLASSES = {}
# recommended_units deprecation message; if the attribute is removed later,
# also remove its use in BaseFrame._get_representation_info.
_recommended_units_deprecation = """
The 'recommended_units' attribute is deprecated since 3.0 and may be removed
in a future version. Its main use, of representing angles in degrees in frames,
is now done automatically in frames. Further overrides are discouraged but can
be done using a frame's ``frame_specific_representation_info``.
"""
def _array2string(values, prefix=''):
# Work around version differences for array2string.
kwargs = {'separator': ', ', 'prefix': prefix}
kwargs['formatter'] = {}
if NUMPY_LT_1_14: # in 1.14, style is no longer used (and deprecated)
kwargs['style'] = repr
return np.array2string(values, **kwargs)
def _combine_xyz(x, y, z, xyz_axis=0):
"""
Combine components ``x``, ``y``, ``z`` into a single Quantity array.
Parameters
----------
x, y, z : `~astropy.units.Quantity`
The individual x, y, and z components.
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``, i.e., using the default of ``0``,
the shape will be ``(3,) + x.shape``.
"""
# Get x, y, z to the same units (this is very fast for identical units)
# since np.stack cannot deal with quantity.
cls = x.__class__
unit = x.unit
x = x.value
y = y.to_value(unit)
z = z.to_value(unit)
xyz = np.stack([x, y, z], axis=xyz_axis)
return cls(xyz, unit=unit, copy=False)
class BaseRepresentationOrDifferential(ShapedLikeNDArray):
"""3D coordinate representations and differentials.
Parameters
----------
comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D point or differential. The names are the
keys and the subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
# Ensure multiplication/division with ndarray or Quantity doesn't lead to
# object arrays.
__array_priority__ = 50000
def __init__(self, *args, **kwargs):
# make argument a list, so we can pop them off.
args = list(args)
components = self.components
attrs = []
for component in components:
try:
attrs.append(args.pop(0) if args else kwargs.pop(component))
except KeyError:
raise TypeError('__init__() missing 1 required positional '
'argument: {0!r}'.format(component))
copy = args.pop(0) if args else kwargs.pop('copy', True)
if args:
raise TypeError('unexpected arguments: {0}'.format(args))
if kwargs:
for component in components:
if component in kwargs:
raise TypeError("__init__() got multiple values for "
"argument {0!r}".format(component))
raise TypeError('unexpected keyword arguments: {0}'.format(kwargs))
# Pass attributes through the required initializing classes.
attrs = [self.attr_classes[component](attr, copy=copy)
for component, attr in zip(components, attrs)]
try:
attrs = np.broadcast_arrays(*attrs, subok=True)
except ValueError:
if len(components) <= 2:
c_str = ' and '.join(components)
else:
c_str = ', '.join(components[:2]) + ', and ' + components[2]
raise ValueError("Input parameters {0} cannot be broadcast"
.format(c_str))
# Set private attributes for the attributes. (If not defined explicitly
# on the class, the metaclass will define properties to access these.)
for component, attr in zip(components, attrs):
setattr(self, '_' + component, attr)
@classmethod
def get_name(cls):
"""Name of the representation or differential.
In lower case, with any trailing 'representation' or 'differential'
removed. (E.g., 'spherical' for
`~astropy.coordinates.SphericalRepresentation` or
`~astropy.coordinates.SphericalDifferential`.)
"""
name = cls.__name__.lower()
if name.endswith('representation'):
name = name[:-14]
elif name.endswith('differential'):
name = name[:-12]
return name
# The two methods that any subclass has to define.
@classmethod
@abc.abstractmethod
def from_cartesian(cls, other):
"""Create a representation of this class from a supplied Cartesian one.
Parameters
----------
other : `CartesianRepresentation`
The representation to turn into this class
Returns
-------
representation : object of this class
A new representation of this class's type.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@abc.abstractmethod
def to_cartesian(self):
"""Convert the representation to its Cartesian form.
Note that any differentials get dropped.
Returns
-------
cartrepr : `CartesianRepresentation`
The representation in Cartesian form.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@property
def components(self):
"""A tuple with the in-order names of the coordinate components."""
return tuple(self.attr_classes)
def _apply(self, method, *args, **kwargs):
"""Create a new representation or differential with ``method`` applied
to the component data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.misc.ShapedLikeNDArray`. It will be
applied to the underlying arrays (e.g., ``x``, ``y``, and ``z`` for
`~astropy.coordinates.CartesianRepresentation`), with the results used
to create a new instance.
Internally, it is also used to apply functions to the components
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``.
"""
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
new = super().__new__(self.__class__)
for component in self.components:
setattr(new, '_' + component,
apply_method(getattr(self, component)))
return new
@property
def shape(self):
"""The shape of the instance and underlying arrays.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
AttributeError
If the shape of any of the components cannot be changed without the
arrays being copied. For these cases, use the ``reshape`` method
(which copies any arrays that cannot be reshaped in-place).
"""
return getattr(self, self.components[0]).shape
@shape.setter
def shape(self, shape):
# We keep track of arrays that were already reshaped since we may have
# to return those to their original shape if a later shape-setting
# fails. (This can happen since coordinates are broadcast together.)
reshaped = []
oldshape = self.shape
for component in self.components:
val = getattr(self, component)
if val.size > 1:
try:
val.shape = shape
except AttributeError:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
# Required to support multiplication and division, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _scale_operation(self, op, *args):
raise NotImplementedError()
def __mul__(self, other):
return self._scale_operation(operator.mul, other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self._scale_operation(operator.truediv, other)
def __div__(self, other): # pragma: py2
return self._scale_operation(operator.truediv, other)
def __neg__(self):
return self._scale_operation(operator.neg)
# Follow numpy convention and make an independent copy.
def __pos__(self):
return self.copy()
# Required to support addition and subtraction, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _combine_operation(self, op, other, reverse=False):
raise NotImplementedError()
def __add__(self, other):
return self._combine_operation(operator.add, other)
def __radd__(self, other):
return self._combine_operation(operator.add, other, reverse=True)
def __sub__(self, other):
return self._combine_operation(operator.sub, other)
def __rsub__(self, other):
return self._combine_operation(operator.sub, other, reverse=True)
# The following are used for repr and str
@property
def _values(self):
"""Turn the coordinates into a record array with the coordinate values.
The record array fields will have the component names.
"""
coo_items = [(c, getattr(self, c)) for c in self.components]
result = np.empty(self.shape, [(c, coo.dtype) for c, coo in coo_items])
for c, coo in coo_items:
result[c] = coo.value
return result
@property
def _units(self):
"""Return a dictionary with the units of the coordinate components."""
return dict([(component, getattr(self, component).unit)
for component in self.components])
@property
def _unitstr(self):
units_set = set(self._units.values())
if len(units_set) == 1:
unitstr = units_set.pop().to_string()
else:
unitstr = '({0})'.format(
', '.join([self._units[component].to_string()
for component in self.components]))
return unitstr
def __str__(self):
return '{0} {1:s}'.format(_array2string(self._values), self._unitstr)
def __repr__(self):
prefixstr = ' '
arrstr = _array2string(self._values, prefix=prefixstr)
diffstr = ''
if getattr(self, 'differentials', None):
diffstr = '\n (has differentials w.r.t.: {0})'.format(
', '.join([repr(key) for key in self.differentials.keys()]))
unitstr = ('in ' + self._unitstr) if self._unitstr else '[dimensionless]'
return '<{0} ({1}) {2:s}\n{3}{4}{5}>'.format(
self.__class__.__name__, ', '.join(self.components),
unitstr, prefixstr, arrstr, diffstr)
def _make_getter(component):
"""Make an attribute getter for use in a property.
Parameters
----------
component : str
The name of the component that should be accessed. This assumes the
actual value is stored in an attribute of that name prefixed by '_'.
"""
# This has to be done in a function to ensure the reference to component
# is not lost/redirected.
component = '_' + component
def get_component(self):
return getattr(self, component)
return get_component
# Need to also subclass ABCMeta rather than type, so that this meta class can
# be combined with a ShapedLikeNDArray subclass (which is an ABC). Without it:
# "TypeError: metaclass conflict: the metaclass of a derived class must be a
# (non-strict) subclass of the metaclasses of all its bases"
class MetaBaseRepresentation(InheritDocstrings, abc.ABCMeta):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
# Register representation name (except for BaseRepresentation)
if cls.__name__ == 'BaseRepresentation':
return
if 'attr_classes' not in dct:
raise NotImplementedError('Representations must have an '
'"attr_classes" class attribute.')
if 'recommended_units' in dct:
warnings.warn(_recommended_units_deprecation,
AstropyDeprecationWarning)
# Ensure we don't override the property that warns about the
# deprecation, but that the value remains the same.
dct.setdefault('_recommended_units', dct.pop('recommended_units'))
repr_name = cls.get_name()
if repr_name in REPRESENTATION_CLASSES:
raise ValueError("Representation class {0} already defined"
.format(repr_name))
REPRESENTATION_CLASSES[repr_name] = cls
# define getters for any component that does not yet have one.
for component in cls.attr_classes:
if not hasattr(cls, component):
setattr(cls, component,
property(_make_getter(component),
doc=("The '{0}' component of the points(s)."
.format(component))))
class BaseRepresentation(BaseRepresentationOrDifferential,
metaclass=MetaBaseRepresentation):
"""Base for representing a point in a 3D coordinate system.
Parameters
----------
comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D points. The names are the keys and the
subclasses the values of the ``attr_classes`` attribute.
differentials : dict, `BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `BaseDifferential`
subclass instance, or a dictionary with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
Notes
-----
All representation classes should subclass this base representation class,
and define an ``attr_classes`` attribute, an `~collections.OrderedDict`
which maps component names to the class that creates them. They must also
define a ``to_cartesian`` method and a ``from_cartesian`` class method. By
default, transformations are done via the cartesian system, but classes
that want to define a smarter transformation path can overload the
``represent_as`` method. If one wants to use an associated differential
class, one should also define ``unit_vectors`` and ``scale_factors``
methods (see those methods for details).
"""
recommended_units = deprecated_attribute('recommended_units', since='3.0')
_recommended_units = {}
def __init__(self, *args, differentials=None, **kwargs):
# Handle any differentials passed in.
super().__init__(*args, **kwargs)
self._differentials = self._validate_differentials(differentials)
def _validate_differentials(self, differentials):
"""
Validate that the provided differentials are appropriate for this
representation and recast/reshape as necessary and then return.
Note that this does *not* set the differentials on
``self._differentials``, but rather leaves that for the caller.
"""
# Now handle the actual validation of any specified differential classes
if differentials is None:
differentials = dict()
elif isinstance(differentials, BaseDifferential):
# We can't handle auto-determining the key for this combo
if (isinstance(differentials, RadialDifferential) and
isinstance(self, UnitSphericalRepresentation)):
raise ValueError("To attach a RadialDifferential to a "
"UnitSphericalRepresentation, you must supply "
"a dictionary with an appropriate key.")
key = differentials._get_deriv_key(self)
differentials = {key: differentials}
for key in differentials:
try:
diff = differentials[key]
except TypeError:
raise TypeError("'differentials' argument must be a "
"dictionary-like object")
diff._check_base(self)
if (isinstance(diff, RadialDifferential) and
isinstance(self, UnitSphericalRepresentation)):
# We trust the passing of a key for a RadialDifferential
# attached to a UnitSphericalRepresentation because it will not
# have a paired component name (UnitSphericalRepresentation has
# no .distance) to automatically determine the expected key
pass
else:
expected_key = diff._get_deriv_key(self)
if key != expected_key:
raise ValueError("For differential object '{0}', expected "
"unit key = '{1}' but received key = '{2}'"
.format(repr(diff), expected_key, key))
# For now, we are very rigid: differentials must have the same shape
# as the representation. This makes it easier to handle __getitem__
# and any other shape-changing operations on representations that
# have associated differentials
if diff.shape != self.shape:
# TODO: message of IncompatibleShapeError is not customizable,
# so use a valueerror instead?
raise ValueError("Shape of differentials must be the same "
"as the shape of the representation ({0} vs "
"{1})".format(diff.shape, self.shape))
return differentials
def _raise_if_has_differentials(self, op_name):
"""
Used to raise a consistent exception for any operation that is not
supported when a representation has differentials attached.
"""
if self.differentials:
raise TypeError("Operation '{0}' is not supported when "
"differentials are attached to a {1}."
.format(op_name, self.__class__.__name__))
@property
def _compatible_differentials(self):
return [DIFFERENTIAL_CLASSES[self.get_name()]]
@property
def differentials(self):
"""A dictionary of differential class instances.
The keys of this dictionary must be a string representation of the SI
unit with which the differential (derivative) is taken. For example, for
a velocity differential on a positional representation, the key would be
``'s'`` for seconds, indicating that the derivative is a time
derivative.
"""
return self._differentials
# We do not make unit_vectors and scale_factors abstract methods, since
# they are only necessary if one also defines an associated Differential.
# Also, doing so would break pre-differential representation subclasses.
def unit_vectors(self):
r"""Cartesian unit vectors in the direction of each component.
Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`,
a change in one component of :math:`\delta c` corresponds to a change
in representation of :math:`\delta c \times f_c \times \hat{e}_c`.
Returns
-------
unit_vectors : dict of `CartesianRepresentation`
The keys are the component names.
"""
raise NotImplementedError("{} has not implemented unit vectors"
.format(type(self)))
def scale_factors(self):
r"""Scale factors for each component's direction.
Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`,
a change in one component of :math:`\delta c` corresponds to a change
in representation of :math:`\delta c \times f_c \times \hat{e}_c`.
Returns
-------
scale_factors : dict of `~astropy.units.Quantity`
The keys are the component names.
"""
raise NotImplementedError("{} has not implemented scale factors."
.format(type(self)))
def _re_represent_differentials(self, new_rep, differential_class):
"""Re-represent the differentials to the specified classes.
This returns a new dictionary with the same keys but with the
attached differentials converted to the new differential classes.
"""
if differential_class is None:
return dict()
if not self.differentials and differential_class:
raise ValueError("No differentials associated with this "
"representation!")
elif (len(self.differentials) == 1 and
inspect.isclass(differential_class) and
issubclass(differential_class, BaseDifferential)):
# TODO: is there a better way to do this?
differential_class = {
list(self.differentials.keys())[0]: differential_class
}
elif set(differential_class.keys()) != set(self.differentials.keys()):
ValueError("Desired differential classes must be passed in "
"as a dictionary with keys equal to a string "
"representation of the unit of the derivative "
"for each differential stored with this "
"representation object ({0})"
.format(self.differentials))
new_diffs = dict()
for k in self.differentials:
diff = self.differentials[k]
try:
new_diffs[k] = diff.represent_as(differential_class[k],
base=self)
except Exception:
if (differential_class[k] not in
new_rep._compatible_differentials):
raise TypeError("Desired differential class {0} is not "
"compatible with the desired "
"representation class {1}"
.format(differential_class[k],
new_rep.__class__))
else:
raise
return new_diffs
def represent_as(self, other_class, differential_class=None):
"""Convert coordinates to another representation.
If the instance is of the requested class, it is returned unmodified.
By default, conversion is done via cartesian coordinates.
Parameters
----------
other_class : `~astropy.coordinates.BaseRepresentation` subclass
The type of representation to turn the coordinates into.
differential_class : dict of `~astropy.coordinates.BaseDifferential`, optional
Classes in which the differentials should be represented.
Can be a single class if only a single differential is attached,
otherwise it should be a `dict` keyed by the same keys as the
differentials.
"""
if other_class is self.__class__ and not differential_class:
return self.without_differentials()
else:
if isinstance(other_class, str):
raise ValueError("Input to a representation's represent_as "
"must be a class, not a string. For "
"strings, use frame objects")
# The default is to convert via cartesian coordinates
new_rep = other_class.from_cartesian(self.to_cartesian())
new_rep._differentials = self._re_represent_differentials(
new_rep, differential_class)
return new_rep
def with_differentials(self, differentials):
"""
Create a new representation with the same positions as this
representation, but with these new differentials.
Differential keys that already exist in this object's differential dict
are overwritten.
Parameters
----------
differentials : Sequence of `~astropy.coordinates.BaseDifferential`
The differentials for the new representation to have.
Returns
-------
newrepr
A copy of this representation, but with the ``differentials`` as
its differentials.
"""
if not differentials:
return self
args = [getattr(self, component) for component in self.components]
# We shallow copy the differentials dictionary so we don't update the
# current object's dictionary when adding new keys
new_rep = self.__class__(*args, differentials=self.differentials.copy(),
copy=False)
new_rep._differentials.update(
new_rep._validate_differentials(differentials))
return new_rep
def without_differentials(self):
"""Return a copy of the representation without attached differentials.
Returns
-------
newrepr
A shallow copy of this representation, without any differentials.
If no differentials were present, no copy is made.
"""
if not self._differentials:
return self
args = [getattr(self, component) for component in self.components]
return self.__class__(*args, copy=False)
@classmethod
def from_representation(cls, representation):
"""Create a new instance of this representation from another one.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` instance
The presentation that should be converted to this class.
"""
return representation.represent_as(cls)
def _apply(self, method, *args, **kwargs):
"""Create a new representation with ``method`` applied to the component
data.
This is not a simple inherit from ``BaseRepresentationOrDifferential``
because we need to call ``._apply()`` on any associated differential
classes.
See docstring for `BaseRepresentationOrDifferential._apply`.
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``.
"""
rep = super()._apply(method, *args, **kwargs)
rep._differentials = dict(
[(k, diff._apply(method, *args, **kwargs))
for k, diff in self._differentials.items()])
return rep
def _scale_operation(self, op, *args):
"""Scale all non-angular components, leaving angular ones unchanged.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc.
*args
Any arguments required for the operator (typically, what is to
be multiplied with, divided by).
"""
self._raise_if_has_differentials(op.__name__)
results = []
for component, cls in self.attr_classes.items():
value = getattr(self, component)
if issubclass(cls, Angle):
results.append(value)
else:
results.append(op(value, *args))
# try/except catches anything that cannot initialize the class, such
# as operations that returned NotImplemented or a representation
# instead of a quantity (as would happen for, e.g., rep * rep).
try:
return self.__class__(*results)
except Exception:
return NotImplemented
def _combine_operation(self, op, other, reverse=False):
"""Combine two representation.
By default, operate on the cartesian representations of both.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` instance
The other representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
self._raise_if_has_differentials(op.__name__)
result = self.to_cartesian()._combine_operation(op, other, reverse)
if result is NotImplemented:
return NotImplemented
else:
return self.from_cartesian(result)
# We need to override this setter to support differentials
@BaseRepresentationOrDifferential.shape.setter
def shape(self, shape):
orig_shape = self.shape
# See: https://stackoverflow.com/questions/3336767/ for an example
BaseRepresentationOrDifferential.shape.fset(self, shape)
# also try to perform shape-setting on any associated differentials
try:
for k in self.differentials:
self.differentials[k].shape = shape
except Exception:
BaseRepresentationOrDifferential.shape.fset(self, orig_shape)
for k in self.differentials:
self.differentials[k].shape = orig_shape
raise
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Note that any associated differentials will be dropped during this
operation.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.sqrt(functools.reduce(
operator.add, (getattr(self, component)**2
for component, cls in self.attr_classes.items()
if not issubclass(cls, Angle))))
def mean(self, *args, **kwargs):
"""Vector mean.
Averaging is done by converting the representation to cartesian, and
taking the mean of the x, y, and z components. The result is converted
back to the same representation as the input.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
Returns
-------
mean : representation
Vector mean, in the same representation as that of the input.
"""
self._raise_if_has_differentials('mean')
return self.from_cartesian(self.to_cartesian().mean(*args, **kwargs))
def sum(self, *args, **kwargs):
"""Vector sum.
Adding is done by converting the representation to cartesian, and
summing the x, y, and z components. The result is converted back to the
same representation as the input.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
Returns
-------
sum : representation
Vector sum, in the same representation as that of the input.
"""
self._raise_if_has_differentials('sum')
return self.from_cartesian(self.to_cartesian().sum(*args, **kwargs))
def dot(self, other):
"""Dot product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`.
Note that any associated differentials will be dropped during this
operation.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation`
The representation to take the dot product with.
Returns
-------
dot_product : `~astropy.units.Quantity`
The sum of the product of the x, y, and z components of the
cartesian representations of ``self`` and ``other``.
"""
return self.to_cartesian().dot(other)
def cross(self, other):
"""Vector cross product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`, and converting the
result back to the type of representation of ``self``.
Parameters
----------
other : representation
The representation to take the cross product with.
Returns
-------
cross_product : representation
With vectors perpendicular to both ``self`` and ``other``, in the
same type of representation as ``self``.
"""
self._raise_if_has_differentials('cross')
return self.from_cartesian(self.to_cartesian().cross(other))
class CartesianRepresentation(BaseRepresentation):
"""
Representation of points in 3D cartesian coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array
The x, y, and z coordinates of the point(s). If ``x``, ``y``, and ``z``
have different shapes, they should be broadcastable. If not quantity,
``unit`` should be set. If only ``x`` is given, it is assumed that it
contains an array with the 3 coordinates stored along ``xyz_axis``.
unit : `~astropy.units.Unit` or str
If given, the coordinates will be converted to this unit (or taken to
be in this unit if not given.
xyz_axis : int, optional
The axis along which the coordinates are stored when a single array is
provided rather than distinct ``x``, ``y``, and ``z`` (default: 0).
differentials : dict, `CartesianDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`CartesianDifferential` instance, or a dictionary of
`CartesianDifferential` s with keys set to a string representation of
the SI unit with which the differential (derivative) is taken. For
example, for a velocity differential on a positional representation, the
key would be ``'s'`` for seconds, indicating that the derivative is a
time derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('x', u.Quantity),
('y', u.Quantity),
('z', u.Quantity)])
_xyz = None
def __init__(self, x, y=None, z=None, unit=None, xyz_axis=None,
differentials=None, copy=True):
if y is None and z is None:
if isinstance(x, np.ndarray) and x.dtype.kind not in 'OV':
# Short-cut for 3-D array input.
x = u.Quantity(x, unit, copy=copy, subok=True)
# Keep a link to the array with all three coordinates
# so that we can return it quickly if needed in get_xyz.
self._xyz = x
if xyz_axis:
x = np.moveaxis(x, xyz_axis, 0)
self._xyz_axis = xyz_axis
else:
self._xyz_axis = 0
self._x, self._y, self._z = x
self._differentials = self._validate_differentials(differentials)
return
else:
x, y, z = x
if xyz_axis is not None:
raise ValueError("xyz_axis should only be set if x, y, and z are "
"in a single array passed in through x, "
"i.e., y and z should not be not given.")
if y is None or z is None:
raise ValueError("x, y, and z are required to instantiate {0}"
.format(self.__class__.__name__))
if unit is not None:
x = u.Quantity(x, unit, copy=copy, subok=True)
y = u.Quantity(y, unit, copy=copy, subok=True)
z = u.Quantity(z, unit, copy=copy, subok=True)
copy = False
super().__init__(x, y, z, copy=copy, differentials=differentials)
if not (self._x.unit.is_equivalent(self._y.unit) and
self._x.unit.is_equivalent(self._z.unit)):
raise u.UnitsError("x, y, and z should have matching physical types")
def unit_vectors(self):
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
o = np.broadcast_to(0.*u.one, self.shape, subok=True)
return OrderedDict(
(('x', CartesianRepresentation(l, o, o, copy=False)),
('y', CartesianRepresentation(o, l, o, copy=False)),
('z', CartesianRepresentation(o, o, l, copy=False))))
def scale_factors(self):
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return OrderedDict((('x', l), ('y', l), ('z', l)))
def get_xyz(self, xyz_axis=0):
"""Return a vector array of the x, y, and z coordinates.
Parameters
----------
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``. Note that, if possible,
this will be a view.
"""
if self._xyz is not None:
if self._xyz_axis == xyz_axis:
return self._xyz
else:
return np.moveaxis(self._xyz, self._xyz_axis, xyz_axis)
# Create combined array. TO DO: keep it in _xyz for repeated use?
# But then in-place changes have to cancel it. Likely best to
# also update components.
return _combine_xyz(self._x, self._y, self._z, xyz_axis=xyz_axis)
xyz = property(get_xyz)
@classmethod
def from_cartesian(cls, other):
return other
def to_cartesian(self):
return self
def transform(self, matrix):
"""
Transform the cartesian coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : `~numpy.ndarray`
A 3x3 transformation matrix, such as a rotation matrix.
Examples
--------
We can start off by creating a cartesian representation object:
>>> from astropy import units as u
>>> from astropy.coordinates import CartesianRepresentation
>>> rep = CartesianRepresentation([1, 2] * u.pc,
... [2, 3] * u.pc,
... [3, 4] * u.pc)
We now create a rotation matrix around the z axis:
>>> from astropy.coordinates.matrix_utilities import rotation_matrix
>>> rotation = rotation_matrix(30 * u.deg, axis='z')
Finally, we can apply this transformation:
>>> rep_new = rep.transform(rotation)
>>> rep_new.xyz # doctest: +FLOAT_CMP
<Quantity [[ 1.8660254 , 3.23205081],
[ 1.23205081, 1.59807621],
[ 3. , 4. ]] pc>
"""
# erfa rxp: Multiply a p-vector by an r-matrix.
p = erfa_ufunc.rxp(matrix, self.get_xyz(xyz_axis=-1))
# Handle differentials attached to this representation
if self.differentials:
# TODO: speed this up going via d.d_xyz.
new_diffs = dict(
(k, d.from_cartesian(d.to_cartesian().transform(matrix)))
for k, d in self.differentials.items())
else:
new_diffs = None
return self.__class__(p, xyz_axis=-1, copy=False, differentials=new_diffs)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
try:
other_c = other.to_cartesian()
except Exception:
return NotImplemented
first, second = ((self, other_c) if not reverse else
(other_c, self))
return self.__class__(*(op(getattr(first, component),
getattr(second, component))
for component in first.components))
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Note that any associated differentials will be dropped during this
operation.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
# erfa pm: Modulus of p-vector.
return erfa_ufunc.pm(self.get_xyz(xyz_axis=-1))
def mean(self, *args, **kwargs):
"""Vector mean.
Returns a new CartesianRepresentation instance with the means of the
x, y, and z components.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials('mean')
return self._apply('mean', *args, **kwargs)
def sum(self, *args, **kwargs):
"""Vector sum.
Returns a new CartesianRepresentation instance with the sums of the
x, y, and z components.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials('sum')
return self._apply('sum', *args, **kwargs)
def dot(self, other):
"""Dot product of two representations.
Note that any associated differentials will be dropped during this
operation.
Parameters
----------
other : representation
If not already cartesian, it is converted.
Returns
-------
dot_product : `~astropy.units.Quantity`
The sum of the product of the x, y, and z components of ``self``
and ``other``.
"""
try:
other_c = other.to_cartesian()
except Exception:
raise TypeError("cannot only take dot product with another "
"representation, not a {0} instance."
.format(type(other)))
# erfa pdp: p-vector inner (=scalar=dot) product.
return erfa_ufunc.pdp(self.get_xyz(xyz_axis=-1),
other_c.get_xyz(xyz_axis=-1))
def cross(self, other):
"""Cross product of two representations.
Parameters
----------
other : representation
If not already cartesian, it is converted.
Returns
-------
cross_product : `~astropy.coordinates.CartesianRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials('cross')
try:
other_c = other.to_cartesian()
except Exception:
raise TypeError("cannot only take cross product with another "
"representation, not a {0} instance."
.format(type(other)))
# erfa pxp: p-vector outer (=vector=cross) product.
sxo = erfa_ufunc.pxp(self.get_xyz(xyz_axis=-1),
other_c.get_xyz(xyz_axis=-1))
return self.__class__(sxo, xyz_axis=-1)
class UnitSphericalRepresentation(BaseRepresentation):
"""
Representation of points on a unit sphere.
Parameters
----------
lon, lat : `~astropy.units.Quantity` or str
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
differentials : dict, `BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('lon', Longitude),
('lat', Latitude)])
@classproperty
def _dimensional_representation(cls):
return SphericalRepresentation
def __init__(self, lon, lat, differentials=None, copy=True):
super().__init__(lon, lat, differentials=differentials, copy=copy)
@property
def _compatible_differentials(self):
return [UnitSphericalDifferential, UnitSphericalCosLatDifferential,
SphericalDifferential, SphericalCosLatDifferential,
RadialDifferential]
# Could let the metaclass define these automatically, but good to have
# a bit clearer docstrings.
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return OrderedDict(
(('lon', CartesianRepresentation(-sinlon, coslon, 0., copy=False)),
('lat', CartesianRepresentation(-sinlat*coslon, -sinlat*sinlon,
coslat, copy=False))))
def scale_factors(self, omit_coslat=False):
sf_lat = np.broadcast_to(1./u.radian, self.shape, subok=True)
sf_lon = sf_lat if omit_coslat else np.cos(self.lat) / u.radian
return OrderedDict((('lon', sf_lon),
('lat', sf_lat)))
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# NUMPY_LT_1_16 cannot create a vector automatically
p = u.Quantity(np.empty(self.shape + (3,)), u.dimensionless_unscaled,
copy=False)
# erfa s2c: Convert [unit]spherical coordinates to Cartesian.
p = erfa_ufunc.s2c(self.lon, self.lat, p)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
p = cart.get_xyz(xyz_axis=-1)
# erfa c2s: P-vector to [unit]spherical coordinates.
return cls(*erfa_ufunc.c2s(p), copy=False)
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
# TODO: this could be optimized to shortcut even if a differential_class
# is passed in, using the ._re_represent_differentials() method
if inspect.isclass(other_class) and not differential_class:
if issubclass(other_class, PhysicsSphericalRepresentation):
return other_class(phi=self.lon, theta=90 * u.deg - self.lat, r=1.0,
copy=False)
elif issubclass(other_class, SphericalRepresentation):
return other_class(lon=self.lon, lat=self.lat, distance=1.0,
copy=False)
return super().represent_as(other_class, differential_class)
def __mul__(self, other):
self._raise_if_has_differentials('multiplication')
return self._dimensional_representation(lon=self.lon, lat=self.lat,
distance=1. * other)
def __truediv__(self, other):
self._raise_if_has_differentials('division')
return self._dimensional_representation(lon=self.lon, lat=self.lat,
distance=1. / other)
def __neg__(self):
self._raise_if_has_differentials('negation')
return self.__class__(self.lon + 180. * u.deg, -self.lat, copy=False)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units, which is
always unity for vectors on the unit sphere.
Returns
-------
norm : `~astropy.units.Quantity`
Dimensionless ones, with the same shape as the representation.
"""
return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled,
copy=False)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
result = self.to_cartesian()._combine_operation(op, other, reverse)
if result is NotImplemented:
return NotImplemented
else:
return self._dimensional_representation.from_cartesian(result)
def mean(self, *args, **kwargs):
"""Vector mean.
The representation is converted to cartesian, the means of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials('mean')
return self._dimensional_representation.from_cartesian(
self.to_cartesian().mean(*args, **kwargs))
def sum(self, *args, **kwargs):
"""Vector sum.
The representation is converted to cartesian, the sums of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials('sum')
return self._dimensional_representation.from_cartesian(
self.to_cartesian().sum(*args, **kwargs))
def cross(self, other):
"""Cross product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`, and converting the
result back to `~astropy.coordinates.SphericalRepresentation`.
Parameters
----------
other : representation
The representation to take the cross product with.
Returns
-------
cross_product : `~astropy.coordinates.SphericalRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials('cross')
return self._dimensional_representation.from_cartesian(
self.to_cartesian().cross(other))
class RadialRepresentation(BaseRepresentation):
"""
Representation of the distance of points from the origin.
Note that this is mostly intended as an internal helper representation.
It can do little else but being used as a scale in multiplication.
Parameters
----------
distance : `~astropy.units.Quantity`
The distance of the point(s) from the origin.
differentials : dict, `BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('distance', u.Quantity)])
def __init__(self, distance, differentials=None, copy=True):
super().__init__(distance, copy=copy, differentials=differentials)
@property
def distance(self):
"""
The distance from the origin to the point(s).
"""
return self._distance
def unit_vectors(self):
"""Cartesian unit vectors are undefined for radial representation."""
raise NotImplementedError('Cartesian unit vectors are undefined for '
'{0} instances'.format(self.__class__))
def scale_factors(self):
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return OrderedDict((('distance', l),))
def to_cartesian(self):
"""Cannot convert radial representation to cartesian."""
raise NotImplementedError('cannot convert {0} instance to cartesian.'
.format(self.__class__))
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to radial coordinate.
"""
return cls(distance=cart.norm(), copy=False)
def _scale_operation(self, op, *args):
self._raise_if_has_differentials(op.__name__)
return op(self.distance, *args)
def norm(self):
"""Vector norm.
Just the distance itself.
Returns
-------
norm : `~astropy.units.Quantity`
Dimensionless ones, with the same shape as the representation.
"""
return self.distance
def _combine_operation(self, op, other, reverse=False):
return NotImplemented
class SphericalRepresentation(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates.
Parameters
----------
lon, lat : `~astropy.units.Quantity`
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
distance : `~astropy.units.Quantity`
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('lon', Longitude),
('lat', Latitude),
('distance', u.Quantity)])
_unit_representation = UnitSphericalRepresentation
def __init__(self, lon, lat, distance, differentials=None, copy=True):
super().__init__(lon, lat, distance, copy=copy,
differentials=differentials)
if self._distance.unit.physical_type == 'length':
self._distance = self._distance.view(Distance)
@property
def _compatible_differentials(self):
return [UnitSphericalDifferential, UnitSphericalCosLatDifferential,
SphericalDifferential, SphericalCosLatDifferential,
RadialDifferential]
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
@property
def distance(self):
"""
The distance from the origin to the point(s).
"""
return self._distance
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return OrderedDict(
(('lon', CartesianRepresentation(-sinlon, coslon, 0., copy=False)),
('lat', CartesianRepresentation(-sinlat*coslon, -sinlat*sinlon,
coslat, copy=False)),
('distance', CartesianRepresentation(coslat*coslon, coslat*sinlon,
sinlat, copy=False))))
def scale_factors(self, omit_coslat=False):
sf_lat = self.distance / u.radian
sf_lon = sf_lat if omit_coslat else sf_lat * np.cos(self.lat)
sf_distance = np.broadcast_to(1.*u.one, self.shape, subok=True)
return OrderedDict((('lon', sf_lon),
('lat', sf_lat),
('distance', sf_distance)))
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
# TODO: this could be optimized to shortcut even if a differential_class
# is passed in, using the ._re_represent_differentials() method
if inspect.isclass(other_class) and not differential_class:
if issubclass(other_class, PhysicsSphericalRepresentation):
return other_class(phi=self.lon, theta=90 * u.deg - self.lat,
r=self.distance, copy=False)
elif issubclass(other_class, UnitSphericalRepresentation):
return other_class(lon=self.lon, lat=self.lat, copy=False)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.distance, Distance):
d = self.distance.view(u.Quantity)
else:
d = self.distance
# NUMPY_LT_1_16 cannot create a vector automatically
p = u.Quantity(np.empty(self.shape + (3,)), d.unit, copy=False)
# erfa s2p: Convert spherical polar coordinates to p-vector.
p = erfa_ufunc.s2p(self.lon, self.lat, d, p)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
p = cart.get_xyz(xyz_axis=-1)
# erfa p2s: P-vector to spherical polar coordinates.
return cls(*erfa_ufunc.p2s(p), copy=False)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the distance.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.distance)
class PhysicsSphericalRepresentation(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates (using the physics
convention of using ``phi`` and ``theta`` for azimuth and inclination
from the pole).
Parameters
----------
phi, theta : `~astropy.units.Quantity` or str
The azimuth and inclination of the point(s), in angular units. The
inclination should be between 0 and 180 degrees, and the azimuth will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`. If ``copy`` is False, `phi`
will be changed inplace if it is not between 0 and 360 degrees.
r : `~astropy.units.Quantity`
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `PhysicsSphericalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`PhysicsSphericalDifferential` instance, or a dictionary of of
differential instances with keys set to a string representation of the
SI unit with which the differential (derivative) is taken. For example,
for a velocity differential on a positional representation, the key
would be ``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('phi', Angle),
('theta', Angle),
('r', u.Quantity)])
def __init__(self, phi, theta, r, differentials=None, copy=True):
super().__init__(phi, theta, r, copy=copy, differentials=differentials)
# Wrap/validate phi/theta
if copy:
self._phi = self._phi.wrap_at(360 * u.deg)
else:
# necessary because the above version of `wrap_at` has to be a copy
self._phi.wrap_at(360 * u.deg, inplace=True)
if np.any(self._theta < 0.*u.deg) or np.any(self._theta > 180.*u.deg):
raise ValueError('Inclination angle(s) must be within '
'0 deg <= angle <= 180 deg, '
'got {0}'.format(theta.to(u.degree)))
if self._r.unit.physical_type == 'length':
self._r = self._r.view(Distance)
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def theta(self):
"""
The elevation of the point(s).
"""
return self._theta
@property
def r(self):
"""
The distance from the origin to the point(s).
"""
return self._r
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
sintheta, costheta = np.sin(self.theta), np.cos(self.theta)
return OrderedDict(
(('phi', CartesianRepresentation(-sinphi, cosphi, 0., copy=False)),
('theta', CartesianRepresentation(costheta*cosphi,
costheta*sinphi,
-sintheta, copy=False)),
('r', CartesianRepresentation(sintheta*cosphi, sintheta*sinphi,
costheta, copy=False))))
def scale_factors(self):
r = self.r / u.radian
sintheta = np.sin(self.theta)
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return OrderedDict((('phi', r * sintheta),
('theta', r),
('r', l)))
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
# TODO: this could be optimized to shortcut even if a differential_class
# is passed in, using the ._re_represent_differentials() method
if inspect.isclass(other_class) and not differential_class:
if issubclass(other_class, SphericalRepresentation):
return other_class(lon=self.phi, lat=90 * u.deg - self.theta,
distance=self.r)
elif issubclass(other_class, UnitSphericalRepresentation):
return other_class(lon=self.phi, lat=90 * u.deg - self.theta)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.r, Distance):
d = self.r.view(u.Quantity)
else:
d = self.r
x = d * np.sin(self.theta) * np.cos(self.phi)
y = d * np.sin(self.theta) * np.sin(self.phi)
z = d * np.cos(self.theta)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
r = np.hypot(s, cart.z)
phi = np.arctan2(cart.y, cart.x)
theta = np.arctan2(s, cart.z)
return cls(phi=phi, theta=theta, r=r, copy=False)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the radius.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.r)
class CylindricalRepresentation(BaseRepresentation):
"""
Representation of points in 3D cylindrical coordinates.
Parameters
----------
rho : `~astropy.units.Quantity`
The distance from the z axis to the point(s).
phi : `~astropy.units.Quantity` or str
The azimuth of the point(s), in angular units, which will be wrapped
to an angle between 0 and 360 degrees. This can also be instances of
`~astropy.coordinates.Angle`,
z : `~astropy.units.Quantity`
The z coordinate(s) of the point(s)
differentials : dict, `CylindricalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`CylindricalDifferential` instance, or a dictionary of of differential
instances with keys set to a string representation of the SI unit with
which the differential (derivative) is taken. For example, for a
velocity differential on a positional representation, the key would be
``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('rho', u.Quantity),
('phi', Angle),
('z', u.Quantity)])
def __init__(self, rho, phi, z, differentials=None, copy=True):
super().__init__(rho, phi, z, copy=copy, differentials=differentials)
if not self._rho.unit.is_equivalent(self._z.unit):
raise u.UnitsError("rho and z should have matching physical types")
@property
def rho(self):
"""
The distance of the point(s) from the z-axis.
"""
return self._rho
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def z(self):
"""
The height of the point(s).
"""
return self._z
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
l = np.broadcast_to(1., self.shape)
return OrderedDict(
(('rho', CartesianRepresentation(cosphi, sinphi, 0, copy=False)),
('phi', CartesianRepresentation(-sinphi, cosphi, 0, copy=False)),
('z', CartesianRepresentation(0, 0, l, unit=u.one, copy=False))))
def scale_factors(self):
rho = self.rho / u.radian
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return OrderedDict((('rho', l),
('phi', rho),
('z', l)))
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to cylindrical polar
coordinates.
"""
rho = np.hypot(cart.x, cart.y)
phi = np.arctan2(cart.y, cart.x)
z = cart.z
return cls(rho=rho, phi=phi, z=z, copy=False)
def to_cartesian(self):
"""
Converts cylindrical polar coordinates to 3D rectangular cartesian
coordinates.
"""
x = self.rho * np.cos(self.phi)
y = self.rho * np.sin(self.phi)
z = self.z
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
class MetaBaseDifferential(InheritDocstrings, abc.ABCMeta):
"""Set default ``attr_classes`` and component getters on a Differential.
For these, the components are those of the base representation prefixed
by 'd_', and the class is `~astropy.units.Quantity`.
"""
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
# Don't do anything for base helper classes.
if cls.__name__ in ('BaseDifferential', 'BaseSphericalDifferential',
'BaseSphericalCosLatDifferential'):
return
if 'base_representation' not in dct:
raise NotImplementedError('Differential representations must have a'
'"base_representation" class attribute.')
# If not defined explicitly, create attr_classes.
if not hasattr(cls, 'attr_classes'):
base_attr_classes = cls.base_representation.attr_classes
cls.attr_classes = OrderedDict([('d_' + c, u.Quantity)
for c in base_attr_classes])
if 'recommended_units' in dct:
warnings.warn(_recommended_units_deprecation,
AstropyDeprecationWarning)
# Ensure we don't override the property that warns about the
# deprecation, but that the value remains the same.
dct.setdefault('_recommended_units', dct.pop('recommended_units'))
repr_name = cls.get_name()
if repr_name in DIFFERENTIAL_CLASSES:
raise ValueError("Differential class {0} already defined"
.format(repr_name))
DIFFERENTIAL_CLASSES[repr_name] = cls
# If not defined explicitly, create properties for the components.
for component in cls.attr_classes:
if not hasattr(cls, component):
setattr(cls, component,
property(_make_getter(component),
doc=("Component '{0}' of the Differential."
.format(component))))
class BaseDifferential(BaseRepresentationOrDifferential,
metaclass=MetaBaseDifferential):
r"""A base class representing differentials of representations.
These represent differences or derivatives along each component.
E.g., for physics spherical coordinates, these would be
:math:`\delta r, \delta \theta, \delta \phi`.
Parameters
----------
d_comp1, d_comp2, d_comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D differentials. The names are the keys and the
subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
Notes
-----
All differential representation classes should subclass this base class,
and define an ``base_representation`` attribute with the class of the
regular `~astropy.coordinates.BaseRepresentation` for which differential
coordinates are provided. This will set up a default ``attr_classes``
instance with names equal to the base component names prefixed by ``d_``,
and all classes set to `~astropy.units.Quantity`, plus properties to access
those, and a default ``__init__`` for initialization.
"""
recommended_units = deprecated_attribute('recommended_units', since='3.0')
_recommended_units = {}
@classmethod
def _check_base(cls, base):
if cls not in base._compatible_differentials:
raise TypeError("Differential class {0} is not compatible with the "
"base (representation) class {1}"
.format(cls, base.__class__))
def _get_deriv_key(self, base):
"""Given a base (representation instance), determine the unit of the
derivative by removing the representation unit from the component units
of this differential.
"""
# This check is just a last resort so we don't return a strange unit key
# from accidentally passing in the wrong base.
self._check_base(base)
for name in base.components:
comp = getattr(base, name)
d_comp = getattr(self, 'd_{0}'.format(name), None)
if d_comp is not None:
d_unit = comp.unit / d_comp.unit
# Get the si unit without a scale by going via Quantity;
# `.si` causes the scale to be included in the value.
return str(u.Quantity(1., d_unit).si.unit)
else:
raise RuntimeError("Invalid representation-differential match! Not "
"sure how we got into this state.")
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors()
def to_cartesian(self, base):
"""Convert the differential to 3D rectangular cartesian coordinates.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Returns
-------
This object as a `CartesianDifferential`
"""
base_e, base_sf = self._get_base_vectors(base)
return functools.reduce(
operator.add, (getattr(self, d_c) * base_sf[c] * base_e[c]
for d_c, c in zip(self.components, base.components)))
@classmethod
def from_cartesian(cls, other, base):
"""Convert the differential from 3D rectangular cartesian coordinates to
the desired class.
Parameters
----------
other :
The object to convert into this differential.
base : instance of ``self.base_representation``
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Returns
-------
A new differential object that is this class' type.
"""
base_e, base_sf = cls._get_base_vectors(base)
return cls(*(other.dot(e / base_sf[component])
for component, e in base_e.items()), copy=False)
def represent_as(self, other_class, base):
"""Convert coordinates to another representation.
If the instance is of the requested class, it is returned unmodified.
By default, conversion is done via cartesian coordinates.
Parameters
----------
other_class : `~astropy.coordinates.BaseRepresentation` subclass
The type of representation to turn the coordinates into.
base : instance of ``self.base_representation``, optional
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
"""
if other_class is self.__class__:
return self
# The default is to convert via cartesian coordinates.
self_cartesian = self.to_cartesian(base)
if issubclass(other_class, BaseDifferential):
base = base.represent_as(other_class.base_representation)
return other_class.from_cartesian(self_cartesian, base)
else:
return other_class.from_cartesian(self_cartesian)
@classmethod
def from_representation(cls, representation, base):
"""Create a new instance of this representation from another one.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` instance
The presentation that should be converted to this class.
base : instance of ``cls.base_representation``
The base relative to which the differentials will be defined. If
the representation is a differential itself, the base will be
converted to its ``base_representation`` to help convert it.
"""
if isinstance(representation, BaseDifferential):
cartesian = representation.to_cartesian(
base.represent_as(representation.base_representation))
else:
cartesian = representation.to_cartesian()
return cls.from_cartesian(cartesian, base)
def _scale_operation(self, op, *args):
"""Scale all components.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc.
*args
Any arguments required for the operator (typically, what is to
be multiplied with, divided by).
"""
scaled_attrs = [op(getattr(self, c), *args) for c in self.components]
return self.__class__(*scaled_attrs, copy=False)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If ``other`` is a representation,
it will be used as a base for which to evaluate the differential,
and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if isinstance(self, type(other)):
first, second = (self, other) if not reverse else (other, self)
return self.__class__(*[op(getattr(first, c), getattr(second, c))
for c in self.components])
else:
try:
self_cartesian = self.to_cartesian(other)
except TypeError:
return NotImplemented
return other._combine_operation(op, self_cartesian, not reverse)
def __sub__(self, other):
# avoid "differential - representation".
if isinstance(other, BaseRepresentation):
return NotImplemented
return super().__sub__(other)
def norm(self, base=None):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Parameters
----------
base : instance of ``self.base_representation``
Base relative to which the differentials are defined. This is
required to calculate the physical size of the differential for
all but cartesian differentials.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return self.to_cartesian(base).norm()
class CartesianDifferential(BaseDifferential):
"""Differentials in of points in 3D cartesian coordinates.
Parameters
----------
d_x, d_y, d_z : `~astropy.units.Quantity` or array
The x, y, and z coordinates of the differentials. If ``d_x``, ``d_y``,
and ``d_z`` have different shapes, they should be broadcastable. If not
quantities, ``unit`` should be set. If only ``d_x`` is given, it is
assumed that it contains an array with the 3 coordinates stored along
``xyz_axis``.
unit : `~astropy.units.Unit` or str
If given, the differentials will be converted to this unit (or taken to
be in this unit if not given.
xyz_axis : int, optional
The axis along which the coordinates are stored when a single array is
provided instead of distinct ``d_x``, ``d_y``, and ``d_z`` (default: 0).
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = CartesianRepresentation
_d_xyz = None
def __init__(self, d_x, d_y=None, d_z=None, unit=None, xyz_axis=None,
copy=True):
if d_y is None and d_z is None:
if isinstance(d_x, np.ndarray) and d_x.dtype.kind not in 'OV':
# Short-cut for 3-D array input.
d_x = u.Quantity(d_x, unit, copy=copy, subok=True)
# Keep a link to the array with all three coordinates
# so that we can return it quickly if needed in get_xyz.
self._d_xyz = d_x
if xyz_axis:
d_x = np.moveaxis(d_x, xyz_axis, 0)
self._xyz_axis = xyz_axis
else:
self._xyz_axis = 0
self._d_x, self._d_y, self._d_z = d_x
return
else:
d_x, d_y, d_z = d_x
if xyz_axis is not None:
raise ValueError("xyz_axis should only be set if d_x, d_y, and d_z "
"are in a single array passed in through d_x, "
"i.e., d_y and d_z should not be not given.")
if d_y is None or d_z is None:
raise ValueError("d_x, d_y, and d_z are required to instantiate {0}"
.format(self.__class__.__name__))
if unit is not None:
d_x = u.Quantity(d_x, unit, copy=copy, subok=True)
d_y = u.Quantity(d_y, unit, copy=copy, subok=True)
d_z = u.Quantity(d_z, unit, copy=copy, subok=True)
copy = False
super().__init__(d_x, d_y, d_z, copy=copy)
if not (self._d_x.unit.is_equivalent(self._d_y.unit) and
self._d_x.unit.is_equivalent(self._d_z.unit)):
raise u.UnitsError('d_x, d_y and d_z should have equivalent units.')
def to_cartesian(self, base=None):
return CartesianRepresentation(*[getattr(self, c) for c
in self.components])
@classmethod
def from_cartesian(cls, other, base=None):
return cls(*[getattr(other, c) for c in other.components])
def get_d_xyz(self, xyz_axis=0):
"""Return a vector array of the x, y, and z coordinates.
Parameters
----------
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
d_xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``. Note that, if possible,
this will be a view.
"""
if self._d_xyz is not None:
if self._xyz_axis == xyz_axis:
return self._d_xyz
else:
return np.moveaxis(self._d_xyz, self._xyz_axis, xyz_axis)
# Create combined array. TO DO: keep it in _d_xyz for repeated use?
# But then in-place changes have to cancel it. Likely best to
# also update components.
return _combine_xyz(self._d_x, self._d_y, self._d_z, xyz_axis=xyz_axis)
d_xyz = property(get_d_xyz)
class BaseSphericalDifferential(BaseDifferential):
def _d_lon_coslat(self, base):
"""Convert longitude differential d_lon to d_lon_coslat.
Parameters
----------
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
self._check_base(base)
return self.d_lon * np.cos(base.lat)
@classmethod
def _get_d_lon(cls, d_lon_coslat, base):
"""Convert longitude differential d_lon_coslat to d_lon.
Parameters
----------
d_lon_coslat : `~astropy.units.Quantity`
Longitude differential that includes ``cos(lat)``.
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
cls._check_base(base)
return d_lon_coslat / np.cos(base.lat)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If both are different parts of
a `~astropy.coordinates.SphericalDifferential` (e.g., a
`~astropy.coordinates.UnitSphericalDifferential` and a
`~astropy.coordinates.RadialDifferential`), they will combined
appropriately.
If ``other`` is a representation, it will be used as a base for which
to evaluate the differential, and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if (isinstance(other, BaseSphericalDifferential) and
not isinstance(self, type(other)) or
isinstance(other, RadialDifferential)):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.))
for c in all_components}
return SphericalDifferential(**result_args)
return super()._combine_operation(op, other, reverse)
class UnitSphericalDifferential(BaseSphericalDifferential):
"""Differential(s) of points on a unit sphere.
Parameters
----------
d_lon, d_lat : `~astropy.units.Quantity`
The longitude and latitude of the differentials.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = UnitSphericalRepresentation
@classproperty
def _dimensional_differential(cls):
return SphericalDifferential
def __init__(self, d_lon, d_lat, copy=True):
super().__init__(d_lon, d_lat, copy=copy)
if not self._d_lon.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError('d_lon and d_lat should have equivalent units.')
def to_cartesian(self, base):
if isinstance(base, SphericalRepresentation):
scale = base.distance
elif isinstance(base, PhysicsSphericalRepresentation):
scale = base.r
else:
return super().to_cartesian(base)
base = base.represent_as(UnitSphericalRepresentation)
return scale * super().to_cartesian(base)
def represent_as(self, other_class, base=None):
# Only have enough information to represent other unit-spherical.
if issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if isinstance(representation, SphericalDifferential):
return cls(representation.d_lon, representation.d_lat)
elif isinstance(representation, (SphericalCosLatDifferential,
UnitSphericalCosLatDifferential)):
d_lon = cls._get_d_lon(representation.d_lon_coslat, base)
return cls(d_lon, representation.d_lat)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_phi, -representation.d_theta)
return super().from_representation(representation, base)
class SphericalDifferential(BaseSphericalDifferential):
"""Differential(s) of points in 3D spherical coordinates.
Parameters
----------
d_lon, d_lat : `~astropy.units.Quantity`
The differential longitude and latitude.
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = SphericalRepresentation
_unit_differential = UnitSphericalDifferential
def __init__(self, d_lon, d_lat, d_distance, copy=True):
super().__init__(d_lon, d_lat, d_distance, copy=copy)
if not self._d_lon.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError('d_lon and d_lat should have equivalent units.')
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if issubclass(other_class, UnitSphericalDifferential):
return other_class(self.d_lon, self.d_lat)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_distance)
elif issubclass(other_class, SphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat,
self.d_distance)
elif issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat)
elif issubclass(other_class, PhysicsSphericalDifferential):
return other_class(self.d_lon, -self.d_lat, self.d_distance)
else:
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if isinstance(representation, SphericalCosLatDifferential):
d_lon = cls._get_d_lon(representation.d_lon_coslat, base)
return cls(d_lon, representation.d_lat, representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_phi, -representation.d_theta,
representation.d_r)
return super().from_representation(representation, base)
class BaseSphericalCosLatDifferential(BaseDifferential):
"""Differtials from points on a spherical base representation.
With cos(lat) assumed to be included in the longitude differential.
"""
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from (unit)spherical base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates. The scale factor for
longitude does not include the cos(lat) factor.
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors(omit_coslat=True)
def _d_lon(self, base):
"""Convert longitude differential with cos(lat) to one without.
Parameters
----------
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
self._check_base(base)
return self.d_lon_coslat / np.cos(base.lat)
@classmethod
def _get_d_lon_coslat(cls, d_lon, base):
"""Convert longitude differential d_lon to d_lon_coslat.
Parameters
----------
d_lon : `~astropy.units.Quantity`
Value of the longitude differential without ``cos(lat)``.
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
cls._check_base(base)
return d_lon * np.cos(base.lat)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If both are different parts of
a `~astropy.coordinates.SphericalDifferential` (e.g., a
`~astropy.coordinates.UnitSphericalDifferential` and a
`~astropy.coordinates.RadialDifferential`), they will combined
appropriately.
If ``other`` is a representation, it will be used as a base for which
to evaluate the differential, and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if (isinstance(other, BaseSphericalCosLatDifferential) and
not isinstance(self, type(other)) or
isinstance(other, RadialDifferential)):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.))
for c in all_components}
return SphericalCosLatDifferential(**result_args)
return super()._combine_operation(op, other, reverse)
class UnitSphericalCosLatDifferential(BaseSphericalCosLatDifferential):
"""Differential(s) of points on a unit sphere.
Parameters
----------
d_lon_coslat, d_lat : `~astropy.units.Quantity`
The longitude and latitude of the differentials.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = UnitSphericalRepresentation
attr_classes = OrderedDict([('d_lon_coslat', u.Quantity),
('d_lat', u.Quantity)])
@classproperty
def _dimensional_differential(cls):
return SphericalCosLatDifferential
def __init__(self, d_lon_coslat, d_lat, copy=True):
super().__init__(d_lon_coslat, d_lat, copy=copy)
if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError('d_lon_coslat and d_lat should have equivalent '
'units.')
def to_cartesian(self, base):
if isinstance(base, SphericalRepresentation):
scale = base.distance
elif isinstance(base, PhysicsSphericalRepresentation):
scale = base.r
else:
return super().to_cartesian(base)
base = base.represent_as(UnitSphericalRepresentation)
return scale * super().to_cartesian(base)
def represent_as(self, other_class, base=None):
# Only have enough information to represent other unit-spherical.
if issubclass(other_class, UnitSphericalDifferential):
return other_class(self._d_lon(base), self.d_lat)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# All spherical differentials can be done without going to Cartesian,
# though w/o CosLat needs base for the latitude.
if isinstance(representation, SphericalCosLatDifferential):
return cls(representation.d_lon_coslat, representation.d_lat)
elif isinstance(representation, (SphericalDifferential,
UnitSphericalDifferential)):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base)
return cls(d_lon_coslat, representation.d_lat)
elif isinstance(representation, PhysicsSphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base)
return cls(d_lon_coslat, -representation.d_theta)
return super().from_representation(representation, base)
class SphericalCosLatDifferential(BaseSphericalCosLatDifferential):
"""Differential(s) of points in 3D spherical coordinates.
Parameters
----------
d_lon_coslat, d_lat : `~astropy.units.Quantity`
The differential longitude (with cos(lat) included) and latitude.
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = SphericalRepresentation
_unit_differential = UnitSphericalCosLatDifferential
attr_classes = OrderedDict([('d_lon_coslat', u.Quantity),
('d_lat', u.Quantity),
('d_distance', u.Quantity)])
def __init__(self, d_lon_coslat, d_lat, d_distance, copy=True):
super().__init__(d_lon_coslat, d_lat, d_distance, copy=copy)
if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError('d_lon_coslat and d_lat should have equivalent '
'units.')
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though some need base for the latitude to remove cos(lat).
if issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self.d_lon_coslat, self.d_lat)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_distance)
elif issubclass(other_class, SphericalDifferential):
return other_class(self._d_lon(base), self.d_lat, self.d_distance)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self._d_lon(base), self.d_lat)
elif issubclass(other_class, PhysicsSphericalDifferential):
return other_class(self._d_lon(base), -self.d_lat, self.d_distance)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat.
if isinstance(representation, SphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base)
return cls(d_lon_coslat, representation.d_lat,
representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base)
return cls(d_lon_coslat, -representation.d_theta,
representation.d_r)
return super().from_representation(representation, base)
class RadialDifferential(BaseDifferential):
"""Differential(s) of radial distances.
Parameters
----------
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = RadialRepresentation
def to_cartesian(self, base):
return self.d_distance * base.represent_as(
UnitSphericalRepresentation).to_cartesian()
@classmethod
def from_cartesian(cls, other, base):
return cls(other.dot(base.represent_as(UnitSphericalRepresentation)),
copy=False)
@classmethod
def from_representation(cls, representation, base=None):
if isinstance(representation, (SphericalDifferential,
SphericalCosLatDifferential)):
return cls(representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_r)
else:
return super().from_representation(representation, base)
def _combine_operation(self, op, other, reverse=False):
if isinstance(other, self.base_representation):
if reverse:
first, second = other.distance, self.d_distance
else:
first, second = self.d_distance, other.distance
return other.__class__(op(first, second), copy=False)
elif isinstance(other, (BaseSphericalDifferential,
BaseSphericalCosLatDifferential)):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.))
for c in all_components}
return SphericalDifferential(**result_args)
else:
return super()._combine_operation(op, other, reverse)
class PhysicsSphericalDifferential(BaseDifferential):
"""Differential(s) of 3D spherical coordinates using physics convention.
Parameters
----------
d_phi, d_theta : `~astropy.units.Quantity`
The differential azimuth and inclination.
d_r : `~astropy.units.Quantity`
The differential radial distance.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = PhysicsSphericalRepresentation
def __init__(self, d_phi, d_theta, d_r, copy=True):
super().__init__(d_phi, d_theta, d_r, copy=copy)
if not self._d_phi.unit.is_equivalent(self._d_theta.unit):
raise u.UnitsError('d_phi and d_theta should have equivalent '
'units.')
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude. For those, explicitly
# do the equivalent of self._d_lon_coslat in SphericalDifferential.
if issubclass(other_class, SphericalDifferential):
return other_class(self.d_phi, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self.d_phi, -self.d_theta)
elif issubclass(other_class, SphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_r)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat. For that case,
# do the equivalent of cls._d_lon in SphericalDifferential.
if isinstance(representation, SphericalDifferential):
return cls(representation.d_lon, -representation.d_lat,
representation.d_distance)
elif isinstance(representation, SphericalCosLatDifferential):
cls._check_base(base)
d_phi = representation.d_lon_coslat / np.sin(base.theta)
return cls(d_phi, -representation.d_lat, representation.d_distance)
return super().from_representation(representation, base)
class CylindricalDifferential(BaseDifferential):
"""Differential(s) of points in cylindrical coordinates.
Parameters
----------
d_rho : `~astropy.units.Quantity`
The differential cylindrical radius.
d_phi : `~astropy.units.Quantity`
The differential azimuth.
d_z : `~astropy.units.Quantity`
The differential height.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = CylindricalRepresentation
def __init__(self, d_rho, d_phi, d_z, copy=False):
super().__init__(d_rho, d_phi, d_z, copy=copy)
if not self._d_rho.unit.is_equivalent(self._d_z.unit):
raise u.UnitsError("d_rho and d_z should have equivalent units.")
|
9102d5ee4794186964b72991673798f09e3e13fe6fb752b482cedc496c627266 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions for getting a coordinate object
for a named object by querying SESAME and getting the first returned result.
Note that this is intended to be a convenience, and is very simple. If you
need precise coordinates for an object you should find the appropriate
reference for that measurement and input the coordinates manually.
"""
# Standard library
import os
import re
import socket
import urllib.request
import urllib.parse
import urllib.error
# Astropy
from .. import units as u
from .sky_coordinate import SkyCoord
from ..utils import data
from ..utils.state import ScienceState
__all__ = ["get_icrs_coordinates"]
class sesame_url(ScienceState):
"""
The URL(s) to Sesame's web-queryable database.
"""
_value = ["http://cdsweb.u-strasbg.fr/cgi-bin/nph-sesame/",
"http://vizier.cfa.harvard.edu/viz-bin/nph-sesame/"]
@classmethod
def validate(cls, value):
# TODO: Implement me
return value
class sesame_database(ScienceState):
"""
This specifies the default database that SESAME will query when
using the name resolve mechanism in the coordinates
subpackage. Default is to search all databases, but this can be
'all', 'simbad', 'ned', or 'vizier'.
"""
_value = 'all'
@classmethod
def validate(cls, value):
if value not in ['all', 'simbad', 'ned', 'vizier']:
raise ValueError("Unknown database '{0}'".format(value))
return value
class NameResolveError(Exception):
pass
def _parse_response(resp_data):
"""
Given a string response from SESAME, parse out the coordinates by looking
for a line starting with a J, meaning ICRS J2000 coordinates.
Parameters
----------
resp_data : str
The string HTTP response from SESAME.
Returns
-------
ra : str
The string Right Ascension parsed from the HTTP response.
dec : str
The string Declination parsed from the HTTP response.
"""
pattr = re.compile(r"%J\s*([0-9\.]+)\s*([\+\-\.0-9]+)")
matched = pattr.search(resp_data.decode('utf-8'))
if matched is None:
return None, None
else:
ra, dec = matched.groups()
return ra, dec
def get_icrs_coordinates(name):
"""
Retrieve an ICRS object by using an online name resolving service to
retrieve coordinates for the specified name. By default, this will
search all available databases until a match is found. If you would like
to specify the database, use the science state
``astropy.coordinates.name_resolve.sesame_database``. You can also
specify a list of servers to use for querying Sesame using the science
state ``astropy.coordinates.name_resolve.sesame_url``. This will try
each one in order until a valid response is returned. By default, this
list includes the main Sesame host and a mirror at vizier. The
configuration item `astropy.utils.data.Conf.remote_timeout` controls the
number of seconds to wait for a response from the server before giving
up.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
Returns
-------
coord : `astropy.coordinates.ICRS` object
The object's coordinates in the ICRS frame.
"""
database = sesame_database.get()
# The web API just takes the first letter of the database name
db = database.upper()[0]
# Make sure we don't have duplicates in the url list
urls = []
domains = []
for url in sesame_url.get():
domain = urllib.parse.urlparse(url).netloc
# Check for duplicates
if domain not in domains:
domains.append(domain)
# Add the query to the end of the url, add to url list
fmt_url = os.path.join(url, "{db}?{name}")
fmt_url = fmt_url.format(name=urllib.parse.quote(name), db=db)
urls.append(fmt_url)
exceptions = []
for url in urls:
try:
# Retrieve ascii name resolve data from CDS
resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout)
resp_data = resp.read()
break
except urllib.error.URLError as e:
exceptions.append(e)
continue
except socket.timeout as e:
# There are some cases where urllib2 does not catch socket.timeout
# especially while receiving response data on an already previously
# working request
e.reason = "Request took longer than the allowed {:.1f} " \
"seconds".format(data.conf.remote_timeout)
exceptions.append(e)
continue
# All Sesame URL's failed...
else:
messages = ["{url}: {e.reason}".format(url=url, e=e)
for url, e in zip(urls, exceptions)]
raise NameResolveError("All Sesame queries failed. Unable to "
"retrieve coordinates. See errors per URL "
"below: \n {}".format("\n".join(messages)))
ra, dec = _parse_response(resp_data)
if ra is None and dec is None:
if db == "A":
err = "Unable to find coordinates for name '{0}'".format(name)
else:
err = "Unable to find coordinates for name '{0}' in database {1}"\
.format(name, database)
raise NameResolveError(err)
# Return SkyCoord object
sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')
return sc
|
394ac5fcabbf3da18d6baa2aee0a1404327470bfc6cc3f25b475eedc05c7eb90 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains a general framework for defining graphs of transformations
between coordinates, suitable for either spatial coordinates or more generalized
coordinate systems.
The fundamental idea is that each class is a node in the transformation graph,
and transitions from one node to another are defined as functions (or methods)
wrapped in transformation objects.
This module also includes more specific transformation classes for
celestial/spatial coordinate frames, generally focused around matrix-style
transformations that are typically how the algorithms are defined.
"""
import heapq
import inspect
import subprocess
from warnings import warn
from abc import ABCMeta, abstractmethod
from collections import defaultdict, OrderedDict
from contextlib import suppress
from inspect import signature
import numpy as np
from .. import units as u
from ..utils.exceptions import AstropyWarning
from .representation import REPRESENTATION_CLASSES
from .matrix_utilities import matrix_product
__all__ = ['TransformGraph', 'CoordinateTransform', 'FunctionTransform',
'BaseAffineTransform', 'AffineTransform',
'StaticMatrixTransform', 'DynamicMatrixTransform',
'FunctionTransformWithFiniteDifference', 'CompositeTransform']
def frame_attrs_from_set(frame_set):
"""
A `dict` of all the attributes of all frame classes in this
`TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = {}
for frame_cls in frame_set:
result.update(frame_cls.frame_attributes)
return result
def frame_comps_from_set(frame_set):
"""
A `set` of all component names every defined within any frame class in
this `TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = set()
for frame_cls in frame_set:
rep_info = frame_cls._frame_specific_representation_info
for mappings in rep_info.values():
for rep_map in mappings:
result.update([rep_map.framename])
return result
class TransformGraph:
"""
A graph representing the paths between coordinate frames.
"""
def __init__(self):
self._graph = defaultdict(dict)
self.invalidate_cache() # generates cache entries
@property
def _cached_names(self):
if self._cached_names_dct is None:
self._cached_names_dct = dct = {}
for c in self.frame_set:
nm = getattr(c, 'name', None)
if nm is not None:
dct[nm] = c
return self._cached_names_dct
@property
def frame_set(self):
"""
A `set` of all the frame classes present in this `TransformGraph`.
"""
if self._cached_frame_set is None:
self._cached_frame_set = set()
for a in self._graph:
self._cached_frame_set.add(a)
for b in self._graph[a]:
self._cached_frame_set.add(b)
return self._cached_frame_set.copy()
@property
def frame_attributes(self):
"""
A `dict` of all the attributes of all frame classes in this
`TransformGraph`.
"""
if self._cached_frame_attributes is None:
self._cached_frame_attributes = frame_attrs_from_set(self.frame_set)
return self._cached_frame_attributes
@property
def frame_component_names(self):
"""
A `set` of all component names every defined within any frame class in
this `TransformGraph`.
"""
if self._cached_component_names is None:
self._cached_component_names = frame_comps_from_set(self.frame_set)
return self._cached_component_names
def invalidate_cache(self):
"""
Invalidates the cache that stores optimizations for traversing the
transform graph. This is called automatically when transforms
are added or removed, but will need to be called manually if
weights on transforms are modified inplace.
"""
self._cached_names_dct = None
self._cached_frame_set = None
self._cached_frame_attributes = None
self._cached_component_names = None
self._shortestpaths = {}
self._composite_cache = {}
def add_transform(self, fromsys, tosys, transform):
"""
Add a new coordinate transformation to the graph.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
transform : CoordinateTransform or similar callable
The transformation object. Typically a `CoordinateTransform` object,
although it may be some other callable that is called with the same
signature.
Raises
------
TypeError
If ``fromsys`` or ``tosys`` are not classes or ``transform`` is
not callable.
"""
if not inspect.isclass(fromsys):
raise TypeError('fromsys must be a class')
if not inspect.isclass(tosys):
raise TypeError('tosys must be a class')
if not callable(transform):
raise TypeError('transform must be callable')
frame_set = self.frame_set.copy()
frame_set.add(fromsys)
frame_set.add(tosys)
# Now we check to see if any attributes on the proposed frames override
# *any* component names, which we can't allow for some of the logic in
# the SkyCoord initializer to work
attrs = set(frame_attrs_from_set(frame_set).keys())
comps = frame_comps_from_set(frame_set)
invalid_attrs = attrs.intersection(comps)
if invalid_attrs:
invalid_frames = set()
for attr in invalid_attrs:
if attr in fromsys.frame_attributes:
invalid_frames.update([fromsys])
if attr in tosys.frame_attributes:
invalid_frames.update([tosys])
raise ValueError("Frame(s) {0} contain invalid attribute names: {1}"
"\nFrame attributes can not conflict with *any* of"
" the frame data component names (see"
" `frame_transform_graph.frame_component_names`)."
.format(list(invalid_frames), invalid_attrs))
self._graph[fromsys][tosys] = transform
self.invalidate_cache()
def remove_transform(self, fromsys, tosys, transform):
"""
Removes a coordinate transform from the graph.
Parameters
----------
fromsys : class or `None`
The coordinate frame *class* to start from. If `None`,
``transform`` will be searched for and removed (``tosys`` must
also be `None`).
tosys : class or `None`
The coordinate frame *class* to transform into. If `None`,
``transform`` will be searched for and removed (``fromsys`` must
also be `None`).
transform : callable or `None`
The transformation object to be removed or `None`. If `None`
and ``tosys`` and ``fromsys`` are supplied, there will be no
check to ensure the correct object is removed.
"""
if fromsys is None or tosys is None:
if not (tosys is None and fromsys is None):
raise ValueError('fromsys and tosys must both be None if either are')
if transform is None:
raise ValueError('cannot give all Nones to remove_transform')
# search for the requested transform by brute force and remove it
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
if b is transform:
del agraph[b]
break
else:
raise ValueError('Could not find transform {0} in the '
'graph'.format(transform))
else:
if transform is None:
self._graph[fromsys].pop(tosys, None)
else:
curr = self._graph[fromsys].get(tosys, None)
if curr is transform:
self._graph[fromsys].pop(tosys)
else:
raise ValueError('Current transform from {0} to {1} is not '
'{2}'.format(fromsys, tosys, transform))
self.invalidate_cache()
def find_shortest_path(self, fromsys, tosys):
"""
Computes the shortest distance along the transform graph from
one system to another.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
path : list of classes or `None`
The path from ``fromsys`` to ``tosys`` as an in-order sequence
of classes. This list includes *both* ``fromsys`` and
``tosys``. Is `None` if there is no possible path.
distance : number
The total distance/priority from ``fromsys`` to ``tosys``. If
priorities are not set this is the number of transforms
needed. Is ``inf`` if there is no possible path.
"""
inf = float('inf')
# special-case the 0 or 1-path
if tosys is fromsys:
if tosys not in self._graph[fromsys]:
# Means there's no transform necessary to go from it to itself.
return [tosys], 0
if tosys in self._graph[fromsys]:
# this will also catch the case where tosys is fromsys, but has
# a defined transform.
t = self._graph[fromsys][tosys]
return [fromsys, tosys], float(t.priority if hasattr(t, 'priority') else 1)
# otherwise, need to construct the path:
if fromsys in self._shortestpaths:
# already have a cached result
fpaths = self._shortestpaths[fromsys]
if tosys in fpaths:
return fpaths[tosys]
else:
return None, inf
# use Dijkstra's algorithm to find shortest path in all other cases
nodes = []
# first make the list of nodes
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
if fromsys not in nodes or tosys not in nodes:
# fromsys or tosys are isolated or not registered, so there's
# certainly no way to get from one to the other
return None, inf
edgeweights = {}
# construct another graph that is a dict of dicts of priorities
# (used as edge weights in Dijkstra's algorithm)
for a in self._graph:
edgeweights[a] = aew = {}
agraph = self._graph[a]
for b in agraph:
aew[b] = float(agraph[b].priority if hasattr(agraph[b], 'priority') else 1)
# entries in q are [distance, count, nodeobj, pathlist]
# count is needed because in py 3.x, tie-breaking fails on the nodes.
# this way, insertion order is preserved if the weights are the same
q = [[inf, i, n, []] for i, n in enumerate(nodes) if n is not fromsys]
q.insert(0, [0, -1, fromsys, []])
# this dict will store the distance to node from ``fromsys`` and the path
result = {}
# definitely starts as a valid heap because of the insert line; from the
# node to itself is always the shortest distance
while len(q) > 0:
d, orderi, n, path = heapq.heappop(q)
if d == inf:
# everything left is unreachable from fromsys, just copy them to
# the results and jump out of the loop
result[n] = (None, d)
for d, orderi, n, path in q:
result[n] = (None, d)
break
else:
result[n] = (path, d)
path.append(n)
if n not in edgeweights:
# this is a system that can be transformed to, but not from.
continue
for n2 in edgeweights[n]:
if n2 not in result: # already visited
# find where n2 is in the heap
for i in range(len(q)):
if q[i][2] == n2:
break
else:
raise ValueError('n2 not in heap - this should be impossible!')
newd = d + edgeweights[n][n2]
if newd < q[i][0]:
q[i][0] = newd
q[i][3] = list(path)
heapq.heapify(q)
# cache for later use
self._shortestpaths[fromsys] = result
return result[tosys]
def get_transform(self, fromsys, tosys):
"""
Generates and returns the `CompositeTransform` for a transformation
between two coordinate systems.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
trans : `CompositeTransform` or `None`
If there is a path from ``fromsys`` to ``tosys``, this is a
transform object for that path. If no path could be found, this is
`None`.
Notes
-----
This function always returns a `CompositeTransform`, because
`CompositeTransform` is slightly more adaptable in the way it can be
called than other transform classes. Specifically, it takes care of
intermediate steps of transformations in a way that is consistent with
1-hop transformations.
"""
if not inspect.isclass(fromsys):
raise TypeError('fromsys is not a class')
if not inspect.isclass(tosys):
raise TypeError('tosys is not a class')
path, distance = self.find_shortest_path(fromsys, tosys)
if path is None:
return None
transforms = []
currsys = fromsys
for p in path[1:]: # first element is fromsys so we skip it
transforms.append(self._graph[currsys][p])
currsys = p
fttuple = (fromsys, tosys)
if fttuple not in self._composite_cache:
comptrans = CompositeTransform(transforms, fromsys, tosys,
register_graph=False)
self._composite_cache[fttuple] = comptrans
return self._composite_cache[fttuple]
def lookup_name(self, name):
"""
Tries to locate the coordinate class with the provided alias.
Parameters
----------
name : str
The alias to look up.
Returns
-------
coordcls
The coordinate class corresponding to the ``name`` or `None` if
no such class exists.
"""
return self._cached_names.get(name, None)
def get_names(self):
"""
Returns all available transform names. They will all be
valid arguments to `lookup_name`.
Returns
-------
nms : list
The aliases for coordinate systems.
"""
return list(self._cached_names.keys())
def to_dot_graph(self, priorities=True, addnodes=[], savefn=None,
savelayout='plain', saveformat=None, color_edges=True):
"""
Converts this transform graph to the graphviz_ DOT format.
Optionally saves it (requires `graphviz`_ be installed and on your path).
.. _graphviz: http://www.graphviz.org/
Parameters
----------
priorities : bool
If `True`, show the priority values for each transform. Otherwise,
the will not be included in the graph.
addnodes : sequence of str
Additional coordinate systems to add (this can include systems
already in the transform graph, but they will only appear once).
savefn : `None` or str
The file name to save this graph to or `None` to not save
to a file.
savelayout : str
The graphviz program to use to layout the graph (see
graphviz_ for details) or 'plain' to just save the DOT graph
content. Ignored if ``savefn`` is `None`.
saveformat : str
The graphviz output format. (e.g. the ``-Txxx`` option for
the command line program - see graphviz docs for details).
Ignored if ``savefn`` is `None`.
color_edges : bool
Color the edges between two nodes (frames) based on the type of
transform. ``FunctionTransform``: red, ``StaticMatrixTransform``:
blue, ``DynamicMatrixTransform``: green.
Returns
-------
dotgraph : str
A string with the DOT format graph.
"""
nodes = []
# find the node names
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
for node in addnodes:
if node not in nodes:
nodes.append(node)
nodenames = []
invclsaliases = dict([(v, k) for k, v in self._cached_names.items()])
for n in nodes:
if n in invclsaliases:
nodenames.append('{0} [shape=oval label="{0}\\n`{1}`"]'.format(n.__name__, invclsaliases[n]))
else:
nodenames.append(n.__name__ + '[ shape=oval ]')
edgenames = []
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, 'priority') else 1
color = trans_to_color[transform.__class__] if color_edges else 'black'
edgenames.append((a.__name__, b.__name__, pri, color))
# generate simple dot format graph
lines = ['digraph AstropyCoordinateTransformGraph {']
lines.append('; '.join(nodenames) + ';')
for enm1, enm2, weights, color in edgenames:
labelstr_fmt = '[ {0} {1} ]'
if priorities:
priority_part = 'label = "{0}"'.format(weights)
else:
priority_part = ''
color_part = 'color = "{0}"'.format(color)
labelstr = labelstr_fmt.format(priority_part, color_part)
lines.append('{0} -> {1}{2};'.format(enm1, enm2, labelstr))
lines.append('')
lines.append('overlap=false')
lines.append('}')
dotgraph = '\n'.join(lines)
if savefn is not None:
if savelayout == 'plain':
with open(savefn, 'w') as f:
f.write(dotgraph)
else:
args = [savelayout]
if saveformat is not None:
args.append('-T' + saveformat)
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate(dotgraph)
if proc.returncode != 0:
raise OSError('problem running graphviz: \n' + stderr)
with open(savefn, 'w') as f:
f.write(stdout)
return dotgraph
def to_networkx_graph(self):
"""
Converts this transform graph into a networkx graph.
.. note::
You must have the `networkx <http://networkx.lanl.gov/>`_
package installed for this to work.
Returns
-------
nxgraph : `networkx.Graph <http://networkx.lanl.gov/reference/classes.graph.html>`_
This `TransformGraph` as a `networkx.Graph`_.
"""
import networkx as nx
nxgraph = nx.Graph()
# first make the nodes
for a in self._graph:
if a not in nxgraph:
nxgraph.add_node(a)
for b in self._graph[a]:
if b not in nxgraph:
nxgraph.add_node(b)
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, 'priority') else 1
color = trans_to_color[transform.__class__]
nxgraph.add_edge(a, b, weight=pri, color=color)
return nxgraph
def transform(self, transcls, fromsys, tosys, priority=1, **kwargs):
"""
A function decorator for defining transformations.
.. note::
If decorating a static method of a class, ``@staticmethod``
should be added *above* this decorator.
Parameters
----------
transcls : class
The class of the transformation object to create.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Additional keyword arguments are passed into the ``transcls``
constructor.
Returns
-------
deco : function
A function that can be called on another function as a decorator
(see example).
Notes
-----
This decorator assumes the first argument of the ``transcls``
initializer accepts a callable, and that the second and third
are ``fromsys`` and ``tosys``. If this is not true, you should just
initialize the class manually and use `add_transform` instead of
using this decorator.
Examples
--------
::
graph = TransformGraph()
class Frame1(BaseCoordinateFrame):
...
class Frame2(BaseCoordinateFrame):
...
@graph.transform(FunctionTransform, Frame1, Frame2)
def f1_to_f2(f1_obj):
... do something with f1_obj ...
return f2_obj
"""
def deco(func):
# this doesn't do anything directly with the transform because
# ``register_graph=self`` stores it in the transform graph
# automatically
transcls(func, fromsys, tosys, priority=priority,
register_graph=self, **kwargs)
return func
return deco
# <-------------------Define the builtin transform classes-------------------->
class CoordinateTransform(metaclass=ABCMeta):
"""
An object that transforms a coordinate from one system to another.
Subclasses must implement `__call__` with the provided signature.
They should also call this superclass's ``__init__`` in their
``__init__``.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
"""
def __init__(self, fromsys, tosys, priority=1, register_graph=None):
if not inspect.isclass(fromsys):
raise TypeError('fromsys must be a class')
if not inspect.isclass(tosys):
raise TypeError('tosys must be a class')
self.fromsys = fromsys
self.tosys = tosys
self.priority = float(priority)
if register_graph:
# this will do the type-checking when it adds to the graph
self.register(register_graph)
else:
if not inspect.isclass(fromsys) or not inspect.isclass(tosys):
raise TypeError('fromsys and tosys must be classes')
self.overlapping_frame_attr_names = overlap = []
if (hasattr(fromsys, 'get_frame_attr_names') and
hasattr(tosys, 'get_frame_attr_names')):
# the if statement is there so that non-frame things might be usable
# if it makes sense
for from_nm in fromsys.get_frame_attr_names():
if from_nm in tosys.get_frame_attr_names():
overlap.append(from_nm)
def register(self, graph):
"""
Add this transformation to the requested Transformation graph,
replacing anything already connecting these two coordinates.
Parameters
----------
graph : a TransformGraph object
The graph to register this transformation with.
"""
graph.add_transform(self.fromsys, self.tosys, self)
def unregister(self, graph):
"""
Remove this transformation from the requested transformation
graph.
Parameters
----------
graph : a TransformGraph object
The graph to unregister this transformation from.
Raises
------
ValueError
If this is not currently in the transform graph.
"""
graph.remove_transform(self.fromsys, self.tosys, self)
@abstractmethod
def __call__(self, fromcoord, toframe):
"""
Does the actual coordinate transformation from the ``fromsys`` class to
the ``tosys`` class.
Parameters
----------
fromcoord : fromsys object
An object of class matching ``fromsys`` that is to be transformed.
toframe : object
An object that has the attributes necessary to fully specify the
frame. That is, it must have attributes with names that match the
keys of the dictionary that ``tosys.get_frame_attr_names()``
returns. Typically this is of class ``tosys``, but it *might* be
some other class as long as it has the appropriate attributes.
Returns
-------
tocoord : tosys object
The new coordinate after the transform has been applied.
"""
class FunctionTransform(CoordinateTransform):
"""
A coordinate transformation defined by a function that accepts a
coordinate object and returns the transformed coordinate object.
Parameters
----------
func : callable
The transformation function. Should have a call signature
``func(formcoord, toframe)``. Note that, unlike
`CoordinateTransform.__call__`, ``toframe`` is assumed to be of type
``tosys`` for this function.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``func`` is not callable.
ValueError
If ``func`` cannot accept two arguments.
"""
def __init__(self, func, fromsys, tosys, priority=1, register_graph=None):
if not callable(func):
raise TypeError('func must be callable')
with suppress(TypeError):
sig = signature(func)
kinds = [x.kind for x in sig.parameters.values()]
if (len(x for x in kinds if x == sig.POSITIONAL_ONLY) != 2
and sig.VAR_POSITIONAL not in kinds):
raise ValueError('provided function does not accept two arguments')
self.func = func
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
def __call__(self, fromcoord, toframe):
res = self.func(fromcoord, toframe)
if not isinstance(res, self.tosys):
raise TypeError('the transformation function yielded {0} but '
'should have been of type {1}'.format(res, self.tosys))
if fromcoord.data.differentials and not res.data.differentials:
warn("Applied a FunctionTransform to a coordinate frame with "
"differentials, but the FunctionTransform does not handle "
"differentials, so they have been dropped.", AstropyWarning)
return res
class FunctionTransformWithFiniteDifference(FunctionTransform):
r"""
A coordinate transformation that works like a `FunctionTransform`, but
computes velocity shifts based on the finite-difference relative to one of
the frame attributes. Note that the transform function should *not* change
the differential at all in this case, as any differentials will be
overridden.
When a differential is in the from coordinate, the finite difference
calculation has two components. The first part is simple the existing
differential, but re-orientation (using finite-difference techniques) to
point in the direction the velocity vector has in the *new* frame. The
second component is the "induced" velocity. That is, the velocity
intrinsic to the frame itself, estimated by shifting the frame using the
``finite_difference_frameattr_name`` frame attribute a small amount
(``finite_difference_dt``) in time and re-calculating the position.
Parameters
----------
finite_difference_frameattr_name : str or None
The name of the frame attribute on the frames to use for the finite
difference. Both the to and the from frame will be checked for this
attribute, but only one needs to have it. If None, no velocity
component induced from the frame itself will be included - only the
re-orientation of any exsiting differential.
finite_difference_dt : `~astropy.units.Quantity` or callable
If a quantity, this is the size of the differential used to do the
finite difference. If a callable, should accept
``(fromcoord, toframe)`` and return the ``dt`` value.
symmetric_finite_difference : bool
If True, the finite difference is computed as
:math:`\frac{x(t + \Delta t / 2) - x(t + \Delta t / 2)}{\Delta t}`, or
if False, :math:`\frac{x(t + \Delta t) - x(t)}{\Delta t}`. The latter
case has slightly better performance (and more stable finite difference
behavior).
All other parameters are identical to the initializer for
`FunctionTransform`.
"""
def __init__(self, func, fromsys, tosys, priority=1, register_graph=None,
finite_difference_frameattr_name='obstime',
finite_difference_dt=1*u.second,
symmetric_finite_difference=True):
super().__init__(func, fromsys, tosys, priority, register_graph)
self.finite_difference_frameattr_name = finite_difference_frameattr_name
self.finite_difference_dt = finite_difference_dt
self.symmetric_finite_difference = symmetric_finite_difference
@property
def finite_difference_frameattr_name(self):
return self._finite_difference_frameattr_name
@finite_difference_frameattr_name.setter
def finite_difference_frameattr_name(self, value):
if value is None:
self._diff_attr_in_fromsys = self._diff_attr_in_tosys = False
else:
diff_attr_in_fromsys = value in self.fromsys.frame_attributes
diff_attr_in_tosys = value in self.tosys.frame_attributes
if diff_attr_in_fromsys or diff_attr_in_tosys:
self._diff_attr_in_fromsys = diff_attr_in_fromsys
self._diff_attr_in_tosys = diff_attr_in_tosys
else:
raise ValueError('Frame attribute name {} is not a frame '
'attribute of {} or {}'.format(value,
self.fromsys,
self.tosys))
self._finite_difference_frameattr_name = value
def __call__(self, fromcoord, toframe):
from .representation import (CartesianRepresentation,
CartesianDifferential)
supcall = self.func
if fromcoord.data.differentials:
# this is the finite difference case
if callable(self.finite_difference_dt):
dt = self.finite_difference_dt(fromcoord, toframe)
else:
dt = self.finite_difference_dt
halfdt = dt/2
from_diffless = fromcoord.realize_frame(fromcoord.data.without_differentials())
reprwithoutdiff = supcall(from_diffless, toframe)
# first we use the existing differential to compute an offset due to
# the already-existing velocity, but in the new frame
fromcoord_cart = fromcoord.cartesian
if self.symmetric_finite_difference:
fwdxyz = (fromcoord_cart.xyz +
fromcoord_cart.differentials['s'].d_xyz*halfdt)
fwd = supcall(fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe)
backxyz = (fromcoord_cart.xyz -
fromcoord_cart.differentials['s'].d_xyz*halfdt)
back = supcall(fromcoord.realize_frame(CartesianRepresentation(backxyz)), toframe)
else:
fwdxyz = (fromcoord_cart.xyz +
fromcoord_cart.differentials['s'].d_xyz*dt)
fwd = supcall(fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe)
back = reprwithoutdiff
diffxyz = (fwd.cartesian - back.cartesian).xyz / dt
# now we compute the "induced" velocities due to any movement in
# the frame itself over time
attrname = self.finite_difference_frameattr_name
if attrname is not None:
if self.symmetric_finite_difference:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + halfdt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + halfdt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) - halfdt}
from_diffless_back = from_diffless.replicate(**kws)
else:
from_diffless_back = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) - halfdt}
back_frame = toframe.replicate_without_data(**kws)
else:
back_frame = toframe
back = supcall(from_diffless_back, back_frame)
else:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + dt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + dt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
back = reprwithoutdiff
diffxyz += (fwd.cartesian - back.cartesian).xyz / dt
newdiff = CartesianDifferential(diffxyz)
reprwithdiff = reprwithoutdiff.data.to_cartesian().with_differentials(newdiff)
return reprwithoutdiff.realize_frame(reprwithdiff)
else:
return supcall(fromcoord, toframe)
class BaseAffineTransform(CoordinateTransform):
"""Base class for common functionality between the ``AffineTransform``-type
subclasses.
This base class is needed because ``AffineTransform`` and the matrix
transform classes share the ``_apply_transform()`` method, but have
different ``__call__()`` methods. ``StaticMatrixTransform`` passes in a
matrix stored as a class attribute, and both of the matrix transforms pass
in ``None`` for the offset. Hence, user subclasses would likely want to
subclass this (rather than ``AffineTransform``) if they want to provide
alternative transformations using this machinery.
"""
def _apply_transform(self, fromcoord, matrix, offset):
from .representation import (UnitSphericalRepresentation,
CartesianDifferential,
SphericalDifferential,
SphericalCosLatDifferential,
RadialDifferential)
data = fromcoord.data
has_velocity = 's' in data.differentials
# list of unit differentials
_unit_diffs = (SphericalDifferential._unit_differential,
SphericalCosLatDifferential._unit_differential)
unit_vel_diff = (has_velocity and
isinstance(data.differentials['s'], _unit_diffs))
rad_vel_diff = (has_velocity and
isinstance(data.differentials['s'], RadialDifferential))
# Some initial checking to short-circuit doing any re-representation if
# we're going to fail anyways:
if isinstance(data, UnitSphericalRepresentation) and offset is not None:
raise TypeError("Position information stored on coordinate frame "
"is insufficient to do a full-space position "
"transformation (representation class: {0})"
.format(data.__class__))
elif (has_velocity and (unit_vel_diff or rad_vel_diff) and
offset is not None and 's' in offset.differentials):
# Coordinate has a velocity, but it is not a full-space velocity
# that we need to do a velocity offset
raise TypeError("Velocity information stored on coordinate frame "
"is insufficient to do a full-space velocity "
"transformation (differential class: {0})"
.format(data.differentials['s'].__class__))
elif len(data.differentials) > 1:
# We should never get here because the frame initializer shouldn't
# allow more differentials, but this just adds protection for
# subclasses that somehow skip the checks
raise ValueError("Representation passed to AffineTransform contains"
" multiple associated differentials. Only a single"
" differential with velocity units is presently"
" supported (differentials: {0})."
.format(str(data.differentials)))
# If the representation is a UnitSphericalRepresentation, and this is
# just a MatrixTransform, we have to try to turn the differential into a
# Unit version of the differential (if no radial velocity) or a
# sphericaldifferential with zero proper motion (if only a radial
# velocity) so that the matrix operation works
if (has_velocity and isinstance(data, UnitSphericalRepresentation) and
not unit_vel_diff and not rad_vel_diff):
# retrieve just velocity differential
unit_diff = data.differentials['s'].represent_as(
data.differentials['s']._unit_differential, data)
data = data.with_differentials({'s': unit_diff}) # updates key
# If it's a RadialDifferential, we flat-out ignore the differentials
# This is because, by this point (past the validation above), we can
# only possibly be doing a rotation-only transformation, and that
# won't change the radial differential. We later add it back in
elif rad_vel_diff:
data = data.without_differentials()
# Convert the representation and differentials to cartesian without
# having them attached to a frame
rep = data.to_cartesian()
diffs = dict([(k, diff.represent_as(CartesianDifferential, data))
for k, diff in data.differentials.items()])
rep = rep.with_differentials(diffs)
# Only do transform if matrix is specified. This is for speed in
# transformations that only specify an offset (e.g., LSR)
if matrix is not None:
# Note: this applies to both representation and differentials
rep = rep.transform(matrix)
# TODO: if we decide to allow arithmetic between representations that
# contain differentials, this can be tidied up
if offset is not None:
newrep = (rep.without_differentials() +
offset.without_differentials())
else:
newrep = rep.without_differentials()
# We need a velocity (time derivative) and, for now, are strict: the
# representation can only contain a velocity differential and no others.
if has_velocity and not rad_vel_diff:
veldiff = rep.differentials['s'] # already in Cartesian form
if offset is not None and 's' in offset.differentials:
veldiff = veldiff + offset.differentials['s']
newrep = newrep.with_differentials({'s': veldiff})
if isinstance(fromcoord.data, UnitSphericalRepresentation):
# Special-case this because otherwise the return object will think
# it has a valid distance with the default return (a
# CartesianRepresentation instance)
if has_velocity and not unit_vel_diff and not rad_vel_diff:
# We have to first represent as the Unit types we converted to,
# then put the d_distance information back in to the
# differentials and re-represent as their original forms
newdiff = newrep.differentials['s']
_unit_cls = fromcoord.data.differentials['s']._unit_differential
newdiff = newdiff.represent_as(_unit_cls, newrep)
kwargs = dict([(comp, getattr(newdiff, comp))
for comp in newdiff.components])
kwargs['d_distance'] = fromcoord.data.differentials['s'].d_distance
diffs = {'s': fromcoord.data.differentials['s'].__class__(
copy=False, **kwargs)}
elif has_velocity and unit_vel_diff:
newdiff = newrep.differentials['s'].represent_as(
fromcoord.data.differentials['s'].__class__, newrep)
diffs = {'s': newdiff}
else:
diffs = newrep.differentials
newrep = newrep.represent_as(fromcoord.data.__class__) # drops diffs
newrep = newrep.with_differentials(diffs)
elif has_velocity and unit_vel_diff:
# Here, we're in the case where the representation is not
# UnitSpherical, but the differential *is* one of the UnitSpherical
# types. We have to convert back to that differential class or the
# resulting frame will think it has a valid radial_velocity. This
# can probably be cleaned up: we currently have to go through the
# dimensional version of the differential before representing as the
# unit differential so that the units work out (the distance length
# unit shouldn't appear in the resulting proper motions)
diff_cls = fromcoord.data.differentials['s'].__class__
newrep = newrep.represent_as(fromcoord.data.__class__,
diff_cls._dimensional_differential)
newrep = newrep.represent_as(fromcoord.data.__class__, diff_cls)
# We pulled the radial differential off of the representation
# earlier, so now we need to put it back. But, in order to do that, we
# have to turn the representation into a repr that is compatible with
# having a RadialDifferential
if has_velocity and rad_vel_diff:
newrep = newrep.represent_as(fromcoord.data.__class__)
newrep = newrep.with_differentials(
{'s': fromcoord.data.differentials['s']})
return newrep
class AffineTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a 3 x 3
cartesian transformation matrix and a tuple of displacement vectors.
See `~astropy.coordinates.builtin_frames.galactocentric.Galactocentric` for
an example.
Parameters
----------
transform_func : callable
A callable that has the signature ``transform_func(fromcoord, toframe)``
and returns: a (3, 3) matrix that operates on ``fromcoord`` in a
Cartesian representation, and a ``CartesianRepresentation`` with
(optionally) an attached velocity ``CartesianDifferential`` to represent
a translation and offset in velocity to apply after the matrix
operation.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``transform_func`` is not callable
"""
def __init__(self, transform_func, fromsys, tosys, priority=1,
register_graph=None):
if not callable(transform_func):
raise TypeError('transform_func is not callable')
self.transform_func = transform_func
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
def __call__(self, fromcoord, toframe):
M, vec = self.transform_func(fromcoord, toframe)
newrep = self._apply_transform(fromcoord, M, vec)
return toframe.realize_frame(newrep)
class StaticMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation defined as a 3 x 3 cartesian
transformation matrix.
This is distinct from DynamicMatrixTransform in that this kind of matrix is
independent of frame attributes. That is, it depends *only* on the class of
the frame.
Parameters
----------
matrix : array-like or callable
A 3 x 3 matrix for transforming 3-vectors. In most cases will
be unitary (although this is not strictly required). If a callable,
will be called *with no arguments* to get the matrix.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
ValueError
If the matrix is not 3 x 3
"""
def __init__(self, matrix, fromsys, tosys, priority=1, register_graph=None):
if callable(matrix):
matrix = matrix()
self.matrix = np.array(matrix)
if self.matrix.shape != (3, 3):
raise ValueError('Provided matrix is not 3 x 3')
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
def __call__(self, fromcoord, toframe):
newrep = self._apply_transform(fromcoord, self.matrix, None)
return toframe.realize_frame(newrep)
class DynamicMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a
3 x 3 cartesian transformation matrix.
This is similar to, but distinct from StaticMatrixTransform, in that the
matrix for this class might depend on frame attributes.
Parameters
----------
matrix_func : callable
A callable that has the signature ``matrix_func(fromcoord, toframe)`` and
returns a 3 x 3 matrix that converts ``fromcoord`` in a cartesian
representation to the new coordinate system.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``matrix_func`` is not callable
"""
def __init__(self, matrix_func, fromsys, tosys, priority=1,
register_graph=None):
if not callable(matrix_func):
raise TypeError('matrix_func is not callable')
self.matrix_func = matrix_func
def _transform_func(fromcoord, toframe):
return self.matrix_func(fromcoord, toframe), None
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
def __call__(self, fromcoord, toframe):
M = self.matrix_func(fromcoord, toframe)
newrep = self._apply_transform(fromcoord, M, None)
return toframe.realize_frame(newrep)
class CompositeTransform(CoordinateTransform):
"""
A transformation constructed by combining together a series of single-step
transformations.
Note that the intermediate frame objects are constructed using any frame
attributes in ``toframe`` or ``fromframe`` that overlap with the intermediate
frame (``toframe`` favored over ``fromframe`` if there's a conflict). Any frame
attributes that are not present use the defaults.
Parameters
----------
transforms : sequence of `CoordinateTransform` objects
The sequence of transformations to apply.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
collapse_static_mats : bool
If `True`, consecutive `StaticMatrixTransform` will be collapsed into a
single transformation to speed up the calculation.
"""
def __init__(self, transforms, fromsys, tosys, priority=1,
register_graph=None, collapse_static_mats=True):
super().__init__(fromsys, tosys, priority=priority,
register_graph=register_graph)
if collapse_static_mats:
transforms = self._combine_statics(transforms)
self.transforms = tuple(transforms)
def _combine_statics(self, transforms):
"""
Combines together sequences of `StaticMatrixTransform`s into a single
transform and returns it.
"""
newtrans = []
for currtrans in transforms:
lasttrans = newtrans[-1] if len(newtrans) > 0 else None
if (isinstance(lasttrans, StaticMatrixTransform) and
isinstance(currtrans, StaticMatrixTransform)):
combinedmat = matrix_product(currtrans.matrix, lasttrans.matrix)
newtrans[-1] = StaticMatrixTransform(combinedmat,
lasttrans.fromsys,
currtrans.tosys)
else:
newtrans.append(currtrans)
return newtrans
def __call__(self, fromcoord, toframe):
curr_coord = fromcoord
for t in self.transforms:
# build an intermediate frame with attributes taken from either
# `fromframe`, or if not there, `toframe`, or if not there, use
# the defaults
# TODO: caching this information when creating the transform may
# speed things up a lot
frattrs = {}
for inter_frame_attr_nm in t.tosys.get_frame_attr_names():
if hasattr(toframe, inter_frame_attr_nm):
attr = getattr(toframe, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
elif hasattr(fromcoord, inter_frame_attr_nm):
attr = getattr(fromcoord, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
curr_toframe = t.tosys(**frattrs)
curr_coord = t(curr_coord, curr_toframe)
# this is safe even in the case where self.transforms is empty, because
# coordinate objects are immutible, so copying is not needed
return curr_coord
# map class names to colorblind-safe colors
trans_to_color = OrderedDict()
trans_to_color[AffineTransform] = '#555555' # gray
trans_to_color[FunctionTransform] = '#783001' # dark red-ish/brown
trans_to_color[FunctionTransformWithFiniteDifference] = '#d95f02' # red-ish
trans_to_color[StaticMatrixTransform] = '#7570b3' # blue-ish
trans_to_color[DynamicMatrixTransform] = '#1b9e77' # green-ish
|
5c07bd3beab4c5a22ba3d0f8773e6f239221bc6ed1dc9456b8d20624915a5725 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the classes and utility functions for distance and
cartesian coordinates.
"""
import numpy as np
from .. import units as u
from .angles import Angle
__all__ = ['Distance']
__doctest_requires__ = {'*': ['scipy.integrate']}
class Distance(u.SpecificTypeQuantity):
"""
A one-dimensional distance.
This can be initialized in one of four ways:
* A distance ``value`` (array or float) and a ``unit``
* A `~astropy.units.Quantity` object
* A redshift and (optionally) a cosmology.
* Providing a distance modulus
Parameters
----------
value : scalar or `~astropy.units.Quantity`.
The value of this distance.
unit : `~astropy.units.UnitBase`
The units for this distance, *if* ``value`` is not a
`~astropy.units.Quantity`. Must have dimensions of distance.
z : float
A redshift for this distance. It will be converted to a distance
by computing the luminosity distance for this redshift given the
cosmology specified by ``cosmology``. Must be given as a keyword
argument.
cosmology : ``Cosmology`` or `None`
A cosmology that will be used to compute the distance from ``z``.
If `None`, the current cosmology will be used (see
`astropy.cosmology` for details).
distmod : float or `~astropy.units.Quantity`
The distance modulus for this distance. Note that if ``unit`` is not
provided, a guess will be made at the unit between AU, pc, kpc, and Mpc.
parallax : `~astropy.units.Quantity` or `~astropy.coordinates.Angle`
The parallax in angular units.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
order : {'C', 'F', 'A'}, optional
See `~astropy.units.Quantity`.
subok : bool, optional
See `~astropy.units.Quantity`.
ndmin : int, optional
See `~astropy.units.Quantity`.
allow_negative : bool, optional
Whether to allow negative distances (which are possible is some
cosmologies). Default: ``False``.
Raises
------
`~astropy.units.UnitsError`
If the ``unit`` is not a distance.
ValueError
If value specified is less than 0 and ``allow_negative=False``.
If ``z`` is provided with a ``unit`` or ``cosmology`` is provided
when ``z`` is *not* given, or ``value`` is given as well as ``z``.
Examples
--------
>>> from astropy import units as u
>>> from astropy import cosmology
>>> from astropy.cosmology import WMAP5, WMAP7
>>> cosmology.set_current(WMAP7)
>>> d1 = Distance(10, u.Mpc)
>>> d2 = Distance(40, unit=u.au)
>>> d3 = Distance(value=5, unit=u.kpc)
>>> d4 = Distance(z=0.23)
>>> d5 = Distance(z=0.23, cosmology=WMAP5)
>>> d6 = Distance(distmod=24.47)
>>> d7 = Distance(Distance(10 * u.Mpc))
>>> d8 = Distance(parallax=21.34*u.mas)
"""
_equivalent_unit = u.m
_include_easy_conversion_members = True
def __new__(cls, value=None, unit=None, z=None, cosmology=None,
distmod=None, parallax=None, dtype=None, copy=True, order=None,
subok=False, ndmin=0, allow_negative=False):
if z is not None:
if value is not None or distmod is not None:
raise ValueError('Should given only one of `value`, `z` '
'or `distmod` in Distance constructor.')
if cosmology is None:
from ..cosmology import default_cosmology
cosmology = default_cosmology.get()
value = cosmology.luminosity_distance(z)
# Continue on to take account of unit and other arguments
# but a copy is already made, so no longer necessary
copy = False
else:
if cosmology is not None:
raise ValueError('A `cosmology` was given but `z` was not '
'provided in Distance constructor')
value_msg = ('Should given only one of `value`, `z`, `distmod`, or '
'`parallax` in Distance constructor.')
n_not_none = np.sum([x is not None
for x in [value, z, distmod, parallax]])
if n_not_none > 1:
raise ValueError(value_msg)
if distmod is not None:
value = cls._distmod_to_pc(distmod)
if unit is None:
# if the unit is not specified, guess based on the mean of
# the log of the distance
meanlogval = np.log10(value.value).mean()
if meanlogval > 6:
unit = u.Mpc
elif meanlogval > 3:
unit = u.kpc
elif meanlogval < -3: # ~200 AU
unit = u.AU
else:
unit = u.pc
# Continue on to take account of unit and other arguments
# but a copy is already made, so no longer necessary
copy = False
elif parallax is not None:
value = parallax.to(u.pc, equivalencies=u.parallax()).value
unit = u.pc
# Continue on to take account of unit and other arguments
# but a copy is already made, so no longer necessary
copy = False
elif value is None:
raise ValueError('None of `value`, `z`, `distmod`, or '
'`parallax` were given to Distance '
'constructor')
# now we have arguments like for a Quantity, so let it do the work
distance = super().__new__(
cls, value, unit, dtype=dtype, copy=copy, order=order,
subok=subok, ndmin=ndmin)
if not allow_negative and np.any(distance.value < 0):
raise ValueError("Distance must be >= 0. Use the argument "
"'allow_negative=True' to allow negative values.")
return distance
@property
def z(self):
"""Short for ``self.compute_z()``"""
return self.compute_z()
def compute_z(self, cosmology=None):
"""
The redshift for this distance assuming its physical distance is
a luminosity distance.
Parameters
----------
cosmology : ``Cosmology`` or `None`
The cosmology to assume for this calculation, or `None` to use the
current cosmology (see `astropy.cosmology` for details).
Returns
-------
z : float
The redshift of this distance given the provided ``cosmology``.
"""
if cosmology is None:
from ..cosmology import default_cosmology
cosmology = default_cosmology.get()
from ..cosmology import z_at_value
return z_at_value(cosmology.luminosity_distance, self, ztol=1.e-10)
@property
def distmod(self):
"""The distance modulus as a `~astropy.units.Quantity`"""
val = 5. * np.log10(self.to_value(u.pc)) - 5.
return u.Quantity(val, u.mag, copy=False)
@classmethod
def _distmod_to_pc(cls, dm):
dm = u.Quantity(dm, u.mag)
return cls(10 ** ((dm.value + 5) / 5.), u.pc, copy=False)
@property
def parallax(self):
"""The parallax angle as an `~astropy.coordinates.Angle` object"""
return Angle(self.to(u.milliarcsecond, u.parallax()))
|
e07282fa43e126bb6293ec0f82a77334ab0bc26a7a39a6a4cf6df01528a66b4e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions for coordinate-related functionality.
This is generally just wrapping around the object-oriented coordinates
framework, but it is useful for some users who are used to more functional
interfaces.
"""
import numpy as np
from .. import units as u
from ..constants import c
from .. import _erfa as erfa
from ..io import ascii
from ..utils import isiterable, data
from .sky_coordinate import SkyCoord
from .builtin_frames import GCRS, PrecessedGeocentric
from .representation import SphericalRepresentation, CartesianRepresentation
from .builtin_frames.utils import get_jd12
__all__ = ['cartesian_to_spherical', 'spherical_to_cartesian', 'get_sun',
'concatenate', 'get_constellation']
def cartesian_to_spherical(x, y, z):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
Note that the resulting angles are latitude/longitude or
elevation/azimuthal form. I.e., the origin is along the equator
rather than at the north pole.
.. note::
This function simply wraps functionality provided by the
`~astropy.coordinates.CartesianRepresentation` and
`~astropy.coordinates.SphericalRepresentation` classes. In general,
for both performance and readability, we suggest using these classes
directly. But for situations where a quick one-off conversion makes
sense, this function is provided.
Parameters
----------
x : scalar, array-like, or `~astropy.units.Quantity`
The first cartesian coordinate.
y : scalar, array-like, or `~astropy.units.Quantity`
The second cartesian coordinate.
z : scalar, array-like, or `~astropy.units.Quantity`
The third cartesian coordinate.
Returns
-------
r : `~astropy.units.Quantity`
The radial coordinate (in the same units as the inputs).
lat : `~astropy.units.Quantity`
The latitude in radians
lon : `~astropy.units.Quantity`
The longitude in radians
"""
if not hasattr(x, 'unit'):
x = x * u.dimensionless_unscaled
if not hasattr(y, 'unit'):
y = y * u.dimensionless_unscaled
if not hasattr(z, 'unit'):
z = z * u.dimensionless_unscaled
cart = CartesianRepresentation(x, y, z)
sph = cart.represent_as(SphericalRepresentation)
return sph.distance, sph.lat, sph.lon
def spherical_to_cartesian(r, lat, lon):
"""
Converts spherical polar coordinates to rectangular cartesian
coordinates.
Note that the input angles should be in latitude/longitude or
elevation/azimuthal form. I.e., the origin is along the equator
rather than at the north pole.
.. note::
This is a low-level function used internally in
`astropy.coordinates`. It is provided for users if they really
want to use it, but it is recommended that you use the
`astropy.coordinates` coordinate systems.
Parameters
----------
r : scalar, array-like, or `~astropy.units.Quantity`
The radial coordinate (in the same units as the inputs).
lat : scalar, array-like, or `~astropy.units.Quantity`
The latitude (in radians if array or scalar)
lon : scalar, array-like, or `~astropy.units.Quantity`
The longitude (in radians if array or scalar)
Returns
-------
x : float or array
The first cartesian coordinate.
y : float or array
The second cartesian coordinate.
z : float or array
The third cartesian coordinate.
"""
if not hasattr(r, 'unit'):
r = r * u.dimensionless_unscaled
if not hasattr(lat, 'unit'):
lat = lat * u.radian
if not hasattr(lon, 'unit'):
lon = lon * u.radian
sph = SphericalRepresentation(distance=r, lat=lat, lon=lon)
cart = sph.represent_as(CartesianRepresentation)
return cart.x, cart.y, cart.z
def get_sun(time):
"""
Determines the location of the sun at a given time (or times, if the input
is an array `~astropy.time.Time` object), in geocentric coordinates.
Parameters
----------
time : `~astropy.time.Time`
The time(s) at which to compute the location of the sun.
Returns
-------
newsc : `~astropy.coordinates.SkyCoord`
The location of the sun as a `~astropy.coordinates.SkyCoord` in the
`~astropy.coordinates.GCRS` frame.
Notes
-----
The algorithm for determining the sun/earth relative position is based
on the simplified version of VSOP2000 that is part of ERFA. Compared to
JPL's ephemeris, it should be good to about 4 km (in the Sun-Earth
vector) from 1900-2100 C.E., 8 km for the 1800-2200 span, and perhaps
250 km over the 1000-3000.
"""
earth_pv_helio, earth_pv_bary = erfa.epv00(*get_jd12(time, 'tdb'))
# We have to manually do aberration because we're outputting directly into
# GCRS
earth_p = earth_pv_helio['p']
earth_v = earth_pv_bary['v']
# convert barycentric velocity to units of c, but keep as array for passing in to erfa
earth_v /= c.to_value(u.au/u.d)
dsun = np.sqrt(np.sum(earth_p**2, axis=-1))
invlorentz = (1-np.sum(earth_v**2, axis=-1))**0.5
properdir = erfa.ab(earth_p/dsun.reshape(dsun.shape + (1,)),
-earth_v, dsun, invlorentz)
cartrep = CartesianRepresentation(x=-dsun*properdir[..., 0] * u.AU,
y=-dsun*properdir[..., 1] * u.AU,
z=-dsun*properdir[..., 2] * u.AU)
return SkyCoord(cartrep, frame=GCRS(obstime=time))
def concatenate(coords):
"""
Combine multiple coordinate objects into a single
`~astropy.coordinates.SkyCoord`.
"Coordinate objects" here mean frame objects with data,
`~astropy.coordinates.SkyCoord`, or representation objects. Currently,
they must all be in the same frame, but in a future version this may be
relaxed to allow inhomogenous sequences of objects.
Parameters
----------
coords : sequence of coordinate objects
The objects to concatenate
Returns
-------
cskycoord : SkyCoord
A single sky coordinate with its data set to the concatenation of all
the elements in ``coords``
"""
if getattr(coords, 'isscalar', False) or not isiterable(coords):
raise TypeError('The argument to concatenate must be iterable')
return SkyCoord(coords)
# global dictionary that caches repeatedly-needed info for get_constellation
_constellation_data = {}
def get_constellation(coord, short_name=False, constellation_list='iau'):
"""
Determines the constellation(s) a given coordinate object contains.
Parameters
----------
coord : coordinate object
The object to determine the constellation of.
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If ``coords`` contains a scalar coordinate, returns the name of the
constellation. If it is an array coordinate object, it returns an array
of names.
Notes
-----
To determine which constellation a point on the sky is in, this precesses
to B1875, and then uses the Delporte boundaries of the 88 modern
constellations, as tabulated by
`Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_.
"""
if constellation_list != 'iau':
raise ValueError("only 'iau' us currently supported for constellation_list")
# read the data files and cache them if they haven't been already
if not _constellation_data:
cdata = data.get_pkg_data_contents('data/constellation_data_roman87.dat')
ctable = ascii.read(cdata, names=['ral', 'rau', 'decl', 'name'])
cnames = data.get_pkg_data_contents('data/constellation_names.dat', encoding='UTF8')
cnames_short_to_long = dict([(l[:3], l[4:])
for l in cnames.split('\n')
if not l.startswith('#')])
cnames_long = np.array([cnames_short_to_long[nm] for nm in ctable['name']])
_constellation_data['ctable'] = ctable
_constellation_data['cnames_long'] = cnames_long
else:
ctable = _constellation_data['ctable']
cnames_long = _constellation_data['cnames_long']
isscalar = coord.isscalar
# if it is geocentric, we reproduce the frame but with the 1875 equinox,
# which is where the constellations are defined
constel_coord = coord.transform_to(PrecessedGeocentric(equinox='B1875'))
if isscalar:
rah = constel_coord.ra.ravel().hour
decd = constel_coord.dec.ravel().deg
else:
rah = constel_coord.ra.hour
decd = constel_coord.dec.deg
constellidx = -np.ones(len(rah), dtype=int)
notided = constellidx == -1 # should be all
for i, row in enumerate(ctable):
msk = (row['ral'] < rah) & (rah < row['rau']) & (decd > row['decl'])
constellidx[notided & msk] = i
notided = constellidx == -1
if np.sum(notided) == 0:
break
else:
raise ValueError('Could not find constellation for coordinates {0}'.format(constel_coord[notided]))
if short_name:
names = ctable['name'][constellidx]
else:
names = cnames_long[constellidx]
if isscalar:
return names[0]
else:
return names
|
81f101322384575553c385ba3b394a5c242b94181608cde2c847609abefd1261 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Framework and base classes for coordinate frames/"low-level" coordinate
classes.
"""
# Standard library
import abc
import copy
import inspect
from collections import namedtuple, OrderedDict, defaultdict
import warnings
# Dependencies
import numpy as np
# Project
from ..utils.compat.misc import override__dir__
from ..utils.decorators import lazyproperty, format_doc
from ..utils.exceptions import AstropyWarning
from .. import units as u
from ..utils import (OrderedDescriptorContainer, ShapedLikeNDArray,
check_broadcast)
from .transformations import TransformGraph
from . import representation as r
from .angles import Angle
from .attributes import Attribute
# Import old names for Attributes so we don't break backwards-compatibility
# (some users rely on them being here, although that is not encouraged, as this
# is not the public API location -- see attributes.py).
from .attributes import (
TimeFrameAttribute, QuantityFrameAttribute,
EarthLocationAttribute, CoordinateAttribute,
CartesianRepresentationFrameAttribute) # pylint: disable=W0611
__all__ = ['BaseCoordinateFrame', 'frame_transform_graph',
'GenericFrame', 'RepresentationMapping']
# the graph used for all transformations between frames
frame_transform_graph = TransformGraph()
def _get_repr_cls(value):
"""
Return a valid representation class from ``value`` or raise exception.
"""
if value in r.REPRESENTATION_CLASSES:
value = r.REPRESENTATION_CLASSES[value]
elif (not isinstance(value, type) or
not issubclass(value, r.BaseRepresentation)):
raise ValueError(
'Representation is {0!r} but must be a BaseRepresentation class '
'or one of the string aliases {1}'.format(
value, list(r.REPRESENTATION_CLASSES)))
return value
def _get_diff_cls(value):
"""
Return a valid differential class from ``value`` or raise exception.
As originally created, this is only used in the SkyCoord initializer, so if
that is refactored, this function my no longer be necessary.
"""
if value in r.DIFFERENTIAL_CLASSES:
value = r.DIFFERENTIAL_CLASSES[value]
elif (not isinstance(value, type) or
not issubclass(value, r.BaseDifferential)):
raise ValueError(
'Differential is {0!r} but must be a BaseDifferential class '
'or one of the string aliases {1}'.format(
value, list(r.DIFFERENTIAL_CLASSES)))
return value
def _get_repr_classes(base, **differentials):
"""Get valid representation and differential classes.
Parameters
----------
base : str or `~astropy.coordinates.BaseRepresentation` subclass
class for the representation of the base coordinates. If a string,
it is looked up among the known representation classes.
**differentials : dict of str or `~astropy.coordinates.BaseDifferentials`
Keys are like for normal differentials, i.e., 's' for a first
derivative in time, etc. If an item is set to `None`, it will be
guessed from the base class.
Returns
-------
repr_classes : dict of subclasses
The base class is keyed by 'base'; the others by the keys of
``diffferentials``.
"""
base = _get_repr_cls(base)
repr_classes = {'base': base}
for name, differential_type in differentials.items():
if differential_type == 'base':
# We don't want to fail for this case.
differential_type = r.DIFFERENTIAL_CLASSES.get(base.get_name(), None)
elif differential_type in r.DIFFERENTIAL_CLASSES:
differential_type = r.DIFFERENTIAL_CLASSES[differential_type]
elif (differential_type is not None and
(not isinstance(differential_type, type) or
not issubclass(differential_type, r.BaseDifferential))):
raise ValueError(
'Differential is {0!r} but must be a BaseDifferential class '
'or one of the string aliases {1}'.format(
differential_type, list(r.DIFFERENTIAL_CLASSES)))
repr_classes[name] = differential_type
return repr_classes
def _normalize_representation_type(kwargs):
""" This is added for backwards compatibility: if the user specifies the
old-style argument ``representation``, add it back in to the kwargs dict
as ``representation_type``.
"""
if 'representation' in kwargs:
if 'representation_type' in kwargs:
raise ValueError("Both `representation` and `representation_type` "
"were passed to a frame initializer. Please use "
"only `representation_type` (`representation` is "
"now pending deprecation).")
kwargs['representation_type'] = kwargs.pop('representation')
# Need to subclass ABCMeta as well, so that this meta class can be combined
# with ShapedLikeNDArray below (which is an ABC); without it, one gets
# "TypeError: metaclass conflict: the metaclass of a derived class must be a
# (non-strict) subclass of the metaclasses of all its bases"
class FrameMeta(OrderedDescriptorContainer, abc.ABCMeta):
def __new__(mcls, name, bases, members):
if 'default_representation' in members:
default_repr = members.pop('default_representation')
found_default_repr = True
else:
default_repr = None
found_default_repr = False
if 'default_differential' in members:
default_diff = members.pop('default_differential')
found_default_diff = True
else:
default_diff = None
found_default_diff = False
if 'frame_specific_representation_info' in members:
repr_info = members.pop('frame_specific_representation_info')
found_repr_info = True
else:
repr_info = None
found_repr_info = False
# somewhat hacky, but this is the best way to get the MRO according to
# https://mail.python.org/pipermail/python-list/2002-December/167861.html
tmp_cls = super().__new__(mcls, name, bases, members)
# now look through the whole MRO for the class attributes, raw for
# frame_attr_names, and leading underscore for others
for m in (c.__dict__ for c in tmp_cls.__mro__):
if not found_default_repr and '_default_representation' in m:
default_repr = m['_default_representation']
found_default_repr = True
if not found_default_diff and '_default_differential' in m:
default_diff = m['_default_differential']
found_default_diff = True
if (not found_repr_info and
'_frame_specific_representation_info' in m):
# create a copy of the dict so we don't mess with the contents
repr_info = m['_frame_specific_representation_info'].copy()
found_repr_info = True
if found_default_repr and found_default_diff and found_repr_info:
break
else:
raise ValueError(
'Could not find all expected BaseCoordinateFrame class '
'attributes. Are you mis-using FrameMeta?')
# Unless overridden via `frame_specific_representation_info`, velocity
# name defaults are (see also docstring for BaseCoordinateFrame):
# * ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for
# `SphericalCosLatDifferential` proper motion components
# * ``pm_{lon}``, ``pm_{lat}`` for `SphericalDifferential` proper
# motion components
# * ``radial_velocity`` for any `d_distance` component
# * ``v_{x,y,z}`` for `CartesianDifferential` velocity components
# where `{lon}` and `{lat}` are the frame names of the angular
# components.
if repr_info is None:
repr_info = {}
# the tuple() call below is necessary because if it is not there,
# the iteration proceeds in a difficult-to-predict manner in the
# case that one of the class objects hash is such that it gets
# revisited by the iteration. The tuple() call prevents this by
# making the items iterated over fixed regardless of how the dict
# changes
for cls_or_name in tuple(repr_info.keys()):
if isinstance(cls_or_name, str):
# TODO: this provides a layer of backwards compatibility in
# case the key is a string, but now we want explicit classes.
cls = _get_repr_cls(cls_or_name)
repr_info[cls] = repr_info.pop(cls_or_name)
# The default spherical names are 'lon' and 'lat'
repr_info.setdefault(r.SphericalRepresentation,
[RepresentationMapping('lon', 'lon'),
RepresentationMapping('lat', 'lat')])
sph_component_map = {m.reprname: m.framename
for m in repr_info[r.SphericalRepresentation]}
repr_info.setdefault(r.SphericalCosLatDifferential, [
RepresentationMapping(
'd_lon_coslat',
'pm_{lon}_cos{lat}'.format(**sph_component_map),
u.mas/u.yr),
RepresentationMapping('d_lat',
'pm_{lat}'.format(**sph_component_map),
u.mas/u.yr),
RepresentationMapping('d_distance', 'radial_velocity',
u.km/u.s)
])
repr_info.setdefault(r.SphericalDifferential, [
RepresentationMapping('d_lon',
'pm_{lon}'.format(**sph_component_map),
u.mas/u.yr),
RepresentationMapping('d_lat',
'pm_{lat}'.format(**sph_component_map),
u.mas/u.yr),
RepresentationMapping('d_distance', 'radial_velocity',
u.km/u.s)
])
repr_info.setdefault(r.CartesianDifferential, [
RepresentationMapping('d_x', 'v_x', u.km/u.s),
RepresentationMapping('d_y', 'v_y', u.km/u.s),
RepresentationMapping('d_z', 'v_z', u.km/u.s)])
# Unit* classes should follow the same naming conventions
# TODO: this adds some unnecessary mappings for the Unit classes, so
# this could be cleaned up, but in practice doesn't seem to have any
# negative side effects
repr_info.setdefault(r.UnitSphericalRepresentation,
repr_info[r.SphericalRepresentation])
repr_info.setdefault(r.UnitSphericalCosLatDifferential,
repr_info[r.SphericalCosLatDifferential])
repr_info.setdefault(r.UnitSphericalDifferential,
repr_info[r.SphericalDifferential])
# Make read-only properties for the frame class attributes that should
# be read-only to make them immutable after creation.
# We copy attributes instead of linking to make sure there's no
# accidental cross-talk between classes
mcls.readonly_prop_factory(members, 'default_representation',
default_repr)
mcls.readonly_prop_factory(members, 'default_differential',
default_diff)
mcls.readonly_prop_factory(members,
'frame_specific_representation_info',
copy.deepcopy(repr_info))
# now set the frame name as lower-case class name, if it isn't explicit
if 'name' not in members:
members['name'] = name.lower()
return super().__new__(mcls, name, bases, members)
@staticmethod
def readonly_prop_factory(members, attr, value):
private_attr = '_' + attr
def getter(self):
return getattr(self, private_attr)
members[private_attr] = value
members[attr] = property(getter)
_RepresentationMappingBase = \
namedtuple('RepresentationMapping',
('reprname', 'framename', 'defaultunit'))
class RepresentationMapping(_RepresentationMappingBase):
"""
This `~collections.namedtuple` is used with the
``frame_specific_representation_info`` attribute to tell frames what
attribute names (and default units) to use for a particular representation.
``reprname`` and ``framename`` should be strings, while ``defaultunit`` can
be either an astropy unit, the string ``'recommended'`` (to use whatever
the representation's ``recommended_units`` is), or None (to indicate that
no unit mapping should be done).
"""
def __new__(cls, reprname, framename, defaultunit='recommended'):
# this trick just provides some defaults
return super().__new__(cls, reprname, framename, defaultunit)
base_doc = """{__doc__}
Parameters
----------
data : `BaseRepresentation` subclass instance
A representation object or ``None`` to have no data (or use the
coordinate component arguments, see below).
{components}
representation_type : `BaseRepresentation` subclass, str, optional
A representation class or string name of a representation class. This
sets the expected input representation class, thereby changing the
expected keyword arguments for the data passed in. For example, passing
``representation_type='cartesian'`` will make the classes expect
position data with cartesian names, i.e. ``x, y, z`` in most cases.
differential_type : `BaseDifferential` subclass, str, dict, optional
A differential class or dictionary of differential classes (currently
only a velocity differential with key 's' is supported). This sets the
expected input differential class, thereby changing the expected keyword
arguments of the data passed in. For example, passing
``differential_type='cartesian'`` will make the classes expect velocity
data with the argument names ``v_x, v_y, v_z``.
copy : bool, optional
If `True` (default), make copies of the input coordinate arrays.
Can only be passed in as a keyword argument.
{footer}
"""
_components = """
*args, **kwargs
Coordinate components, with names that depend on the subclass.
"""
@format_doc(base_doc, components=_components, footer="")
class BaseCoordinateFrame(ShapedLikeNDArray, metaclass=FrameMeta):
"""
The base class for coordinate frames.
This class is intended to be subclassed to create instances of specific
systems. Subclasses can implement the following attributes:
* `default_representation`
A subclass of `~astropy.coordinates.BaseRepresentation` that will be
treated as the default representation of this frame. This is the
representation assumed by default when the frame is created.
* `default_differential`
A subclass of `~astropy.coordinates.BaseDifferential` that will be
treated as the default differential class of this frame. This is the
differential class assumed by default when the frame is created.
* `~astropy.coordinates.Attribute` class attributes
Frame attributes such as ``FK4.equinox`` or ``FK4.obstime`` are defined
using a descriptor class. See the narrative documentation or
built-in classes code for details.
* `frame_specific_representation_info`
A dictionary mapping the name or class of a representation to a list of
`~astropy.coordinates.RepresentationMapping` objects that tell what
names and default units should be used on this frame for the components
of that representation.
Unless overridden via `frame_specific_representation_info`, velocity name
defaults are:
* ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for `SphericalCosLatDifferential`
proper motion components
* ``pm_{lon}``, ``pm_{lat}`` for `SphericalDifferential` proper motion
components
* ``radial_velocity`` for any ``d_distance`` component
* ``v_{x,y,z}`` for `CartesianDifferential` velocity components
where ``{lon}`` and ``{lat}`` are the frame names of the angular components.
"""
default_representation = None
default_differential = None
# Specifies special names and units for representation and differential
# attributes.
frame_specific_representation_info = {}
_inherit_descriptors_ = (Attribute,)
frame_attributes = OrderedDict()
# Default empty frame_attributes dict
def __init__(self, *args, copy=True, representation_type=None,
differential_type=None, **kwargs):
self._attr_names_with_defaults = []
# This is here for backwards compatibility. It should be possible
# to use either the kwarg representation_type, or representation.
# TODO: In future versions, we will raise a deprecation warning here:
if representation_type is not None:
kwargs['representation_type'] = representation_type
_normalize_representation_type(kwargs)
representation_type = kwargs.pop('representation_type', representation_type)
if representation_type is not None or differential_type is not None:
if representation_type is None:
representation_type = self.default_representation
if (inspect.isclass(differential_type) and
issubclass(differential_type, r.BaseDifferential)):
# TODO: assumes the differential class is for the velocity
# differential
differential_type = {'s': differential_type}
elif isinstance(differential_type, str):
# TODO: assumes the differential class is for the velocity
# differential
diff_cls = r.DIFFERENTIAL_CLASSES[differential_type]
differential_type = {'s': diff_cls}
elif differential_type is None:
if representation_type == self.default_representation:
differential_type = {'s': self.default_differential}
else:
differential_type = {'s': 'base'} # see set_representation_cls()
self.set_representation_cls(representation_type,
**differential_type)
# if not set below, this is a frame with no data
representation_data = None
differential_data = None
args = list(args) # need to be able to pop them
if (len(args) > 0) and (isinstance(args[0], r.BaseRepresentation) or
args[0] is None):
representation_data = args.pop(0)
if len(args) > 0:
raise TypeError(
'Cannot create a frame with both a representation object '
'and other positional arguments')
if representation_data is not None:
diffs = representation_data.differentials
differential_data = diffs.get('s', None)
if ((differential_data is None and len(diffs) > 0) or
(differential_data is not None and len(diffs) > 1)):
raise ValueError('Multiple differentials are associated '
'with the representation object passed in '
'to the frame initializer. Only a single '
'velocity differential is supported. Got: '
'{0}'.format(diffs))
elif self.representation_type:
representation_cls = self.get_representation_cls()
# Get any representation data passed in to the frame initializer
# using keyword or positional arguments for the component names
repr_kwargs = {}
for nmkw, nmrep in self.representation_component_names.items():
if len(args) > 0:
# first gather up positional args
repr_kwargs[nmrep] = args.pop(0)
elif nmkw in kwargs:
repr_kwargs[nmrep] = kwargs.pop(nmkw)
# special-case the Spherical->UnitSpherical if no `distance`
if repr_kwargs:
# TODO: determine how to get rid of the part before the "try" -
# currently removing it has a performance regression for
# unitspherical because of the try-related overhead.
# Also frames have no way to indicate what the "distance" is
if repr_kwargs.get('distance', True) is None:
del repr_kwargs['distance']
if (issubclass(representation_cls, r.SphericalRepresentation)
and 'distance' not in repr_kwargs):
representation_cls = representation_cls._unit_representation
try:
representation_data = representation_cls(copy=copy,
**repr_kwargs)
except TypeError as e:
# this except clause is here to make the names of the
# attributes more human-readable. Without this the names
# come from the representation instead of the frame's
# attribute names.
try:
representation_data = representation_cls._unit_representation(copy=copy,
**repr_kwargs)
except Exception as e2:
msg = str(e)
names = self.get_representation_component_names()
for frame_name, repr_name in names.items():
msg = msg.replace(repr_name, frame_name)
msg = msg.replace('__init__()',
'{0}()'.format(self.__class__.__name__))
e.args = (msg,)
raise e
# Now we handle the Differential data:
# Get any differential data passed in to the frame initializer
# using keyword or positional arguments for the component names
differential_cls = self.get_representation_cls('s')
diff_component_names = self.get_representation_component_names('s')
diff_kwargs = {}
for nmkw, nmrep in diff_component_names.items():
if len(args) > 0:
# first gather up positional args
diff_kwargs[nmrep] = args.pop(0)
elif nmkw in kwargs:
diff_kwargs[nmrep] = kwargs.pop(nmkw)
if diff_kwargs:
if (hasattr(differential_cls, '_unit_differential') and
'd_distance' not in diff_kwargs):
differential_cls = differential_cls._unit_differential
elif len(diff_kwargs) == 1 and 'd_distance' in diff_kwargs:
differential_cls = r.RadialDifferential
try:
differential_data = differential_cls(copy=copy,
**diff_kwargs)
except TypeError as e:
# this except clause is here to make the names of the
# attributes more human-readable. Without this the names
# come from the representation instead of the frame's
# attribute names.
msg = str(e)
names = self.get_representation_component_names('s')
for frame_name, repr_name in names.items():
msg = msg.replace(repr_name, frame_name)
msg = msg.replace('__init__()',
'{0}()'.format(self.__class__.__name__))
e.args = (msg,)
raise
if len(args) > 0:
raise TypeError(
'{0}.__init__ had {1} remaining unhandled arguments'.format(
self.__class__.__name__, len(args)))
if representation_data is None and differential_data is not None:
raise ValueError("Cannot pass in differential component data "
"without positional (representation) data.")
if differential_data:
self._data = representation_data.with_differentials(
{'s': differential_data})
else:
self._data = representation_data # possibly None.
values = {}
for fnm, fdefault in self.get_frame_attr_names().items():
# Read-only frame attributes are defined as FrameAttribue
# descriptors which are not settable, so set 'real' attributes as
# the name prefaced with an underscore.
if fnm in kwargs:
value = kwargs.pop(fnm)
setattr(self, '_' + fnm, value)
# Validate attribute by getting it. If the instance has data,
# this also checks its shape is OK. If not, we do it below.
values[fnm] = getattr(self, fnm)
else:
setattr(self, '_' + fnm, fdefault)
self._attr_names_with_defaults.append(fnm)
if kwargs:
raise TypeError(
'Coordinate frame got unexpected keywords: {0}'.format(
list(kwargs)))
# We do ``is None`` because self._data might evaluate to false for
# empty arrays or data == 0
if self._data is None:
# No data: we still need to check that any non-scalar attributes
# have consistent shapes. Collect them for all attributes with
# size > 1 (which should be array-like and thus have a shape).
shapes = {fnm: value.shape for fnm, value in values.items()
if getattr(value, 'size', 1) > 1}
if shapes:
if len(shapes) > 1:
try:
self._no_data_shape = check_broadcast(*shapes.values())
except ValueError:
raise ValueError(
"non-scalar attributes with inconsistent "
"shapes: {0}".format(shapes))
# Above, we checked that it is possible to broadcast all
# shapes. By getting and thus validating the attributes,
# we verify that the attributes can in fact be broadcast.
for fnm in shapes:
getattr(self, fnm)
else:
self._no_data_shape = shapes.popitem()[1]
else:
self._no_data_shape = ()
else:
# This makes the cache keys backwards-compatible, but also adds
# support for having differentials attached to the frame data
# representation object.
if 's' in self._data.differentials:
# TODO: assumes a velocity unit differential
key = (self._data.__class__.__name__,
self._data.differentials['s'].__class__.__name__,
False)
else:
key = (self._data.__class__.__name__, False)
# Set up representation cache.
self.cache['representation'][key] = self._data
@lazyproperty
def cache(self):
"""
Cache for this frame, a dict. It stores anything that should be
computed from the coordinate data (*not* from the frame attributes).
This can be used in functions to store anything that might be
expensive to compute but might be re-used by some other function.
E.g.::
if 'user_data' in myframe.cache:
data = myframe.cache['user_data']
else:
myframe.cache['user_data'] = data = expensive_func(myframe.lat)
If in-place modifications are made to the frame data, the cache should
be cleared::
myframe.cache.clear()
"""
return defaultdict(dict)
@property
def data(self):
"""
The coordinate data for this object. If this frame has no data, an
`ValueError` will be raised. Use `has_data` to
check if data is present on this frame object.
"""
if self._data is None:
raise ValueError('The frame object "{0!r}" does not have '
'associated data'.format(self))
return self._data
@property
def has_data(self):
"""
True if this frame has `data`, False otherwise.
"""
return self._data is not None
@property
def shape(self):
return self.data.shape if self.has_data else self._no_data_shape
# We have to override the ShapedLikeNDArray definitions, since our shape
# does not have to be that of the data.
def __len__(self):
return len(self.data)
def __bool__(self):
return self.has_data and self.size > 0
@property
def size(self):
return self.data.size
@property
def isscalar(self):
return self.has_data and self.data.isscalar
@classmethod
def get_frame_attr_names(cls):
return OrderedDict((name, getattr(cls, name))
for name in cls.frame_attributes)
def get_representation_cls(self, which='base'):
"""The class used for part of this frame's data.
Parameters
----------
which : ('base', 's', `None`)
The class of which part to return. 'base' means the class used to
represent the coordinates; 's' the first derivative to time, i.e.,
the class representing the proper motion and/or radial velocity.
If `None`, return a dict with both.
Returns
-------
representation : `~astropy.coordinates.BaseRepresentation` or `~astropy.coordinates.BaseDifferential`.
"""
if not hasattr(self, '_representation'):
self._representation = {'base': self.default_representation,
's': self.default_differential}
if which is not None:
return self._representation[which]
else:
return self._representation
def set_representation_cls(self, base=None, s='base'):
"""Set representation and/or differential class for this frame's data.
Parameters
----------
base : str, `~astropy.coordinates.BaseRepresentation` subclass, optional
The name or subclass to use to represent the coordinate data.
s : `~astropy.coordinates.BaseDifferential` subclass, optional
The differential subclass to use to represent any velocities,
such as proper motion and radial velocity. If equal to 'base',
which is the default, it will be inferred from the representation.
If `None`, the representation will drop any differentials.
"""
if base is None:
base = self._representation['base']
self._representation = _get_repr_classes(base=base, s=s)
representation_type = property(
fget=get_representation_cls, fset=set_representation_cls,
doc="""The representation class used for this frame's data.
This will be a subclass from `~astropy.coordinates.BaseRepresentation`.
Can also be *set* using the string name of the representation. If you
wish to set an explicit differential class (rather than have it be
inferred), use the ``set_represenation_cls`` method.
""")
@property
def differential_type(self):
"""
The differential used for this frame's data.
This will be a subclass from `~astropy.coordinates.BaseDifferential`.
For simultaneous setting of representation and differentials, see the
``set_represenation_cls`` method.
"""
return self.get_representation_cls('s')
@differential_type.setter
def differential_type(self, value):
self.set_representation_cls(s=value)
# TODO: deprecate these?
@property
def representation(self):
return self.representation_type
@representation.setter
def representation(self, value):
self.representation_type = value
@classmethod
def _get_representation_info(cls):
# This exists as a class method only to support handling frame inputs
# without units, which are deprecated and will be removed. This can be
# moved into the representation_info property at that time.
repr_attrs = {}
for repr_diff_cls in (list(r.REPRESENTATION_CLASSES.values()) +
list(r.DIFFERENTIAL_CLASSES.values())):
repr_attrs[repr_diff_cls] = {'names': [], 'units': []}
for c, c_cls in repr_diff_cls.attr_classes.items():
repr_attrs[repr_diff_cls]['names'].append(c)
# TODO: when "recommended_units" is removed, just directly use
# the default part here.
rec_unit = repr_diff_cls._recommended_units.get(
c, u.deg if issubclass(c_cls, Angle) else None)
repr_attrs[repr_diff_cls]['units'].append(rec_unit)
for repr_diff_cls, mappings in cls._frame_specific_representation_info.items():
# take the 'names' and 'units' tuples from repr_attrs,
# and then use the RepresentationMapping objects
# to update as needed for this frame.
nms = repr_attrs[repr_diff_cls]['names']
uns = repr_attrs[repr_diff_cls]['units']
comptomap = dict([(m.reprname, m) for m in mappings])
for i, c in enumerate(repr_diff_cls.attr_classes.keys()):
if c in comptomap:
mapp = comptomap[c]
nms[i] = mapp.framename
# need the isinstance because otherwise if it's a unit it
# will try to compare to the unit string representation
if not (isinstance(mapp.defaultunit, str) and
mapp.defaultunit == 'recommended'):
uns[i] = mapp.defaultunit
# else we just leave it as recommended_units says above
# Convert to tuples so that this can't mess with frame internals
repr_attrs[repr_diff_cls]['names'] = tuple(nms)
repr_attrs[repr_diff_cls]['units'] = tuple(uns)
return repr_attrs
@lazyproperty
def representation_info(self):
"""
A dictionary with the information of what attribute names for this frame
apply to particular representations.
"""
return self._get_representation_info()
def get_representation_component_names(self, which='base'):
out = OrderedDict()
repr_or_diff_cls = self.get_representation_cls(which)
if repr_or_diff_cls is None:
return out
data_names = repr_or_diff_cls.attr_classes.keys()
repr_names = self.representation_info[repr_or_diff_cls]['names']
for repr_name, data_name in zip(repr_names, data_names):
out[repr_name] = data_name
return out
def get_representation_component_units(self, which='base'):
out = OrderedDict()
repr_or_diff_cls = self.get_representation_cls(which)
if repr_or_diff_cls is None:
return out
repr_attrs = self.representation_info[repr_or_diff_cls]
repr_names = repr_attrs['names']
repr_units = repr_attrs['units']
for repr_name, repr_unit in zip(repr_names, repr_units):
if repr_unit:
out[repr_name] = repr_unit
return out
representation_component_names = property(get_representation_component_names)
representation_component_units = property(get_representation_component_units)
def _replicate(self, data, copy=False, **kwargs):
"""Base for replicating a frame, with possibly different attributes.
Produces a new instance of the frame using the attributes of the old
frame (unless overridden) and with the data given.
Parameters
----------
data : `~astropy.coordinates.BaseRepresentation` or `None`
Data to use in the new frame instance. If `None`, it will be
a data-less frame.
copy : bool, optional
Whether data and the attributes on the old frame should be copied
(default), or passed on by reference.
**kwargs
Any attributes that should be overridden.
"""
# This is to provide a slightly nicer error message if the user tries
# to use frame_obj.representation instead of frame_obj.data to get the
# underlying representation object [e.g., #2890]
if inspect.isclass(data):
raise TypeError('Class passed as data instead of a representation '
'instance. If you called frame.representation, this'
' returns the representation class. frame.data '
'returns the instantiated object - you may want to '
' use this instead.')
if copy and data is not None:
data = data.copy()
for attr in self.get_frame_attr_names():
if (attr not in self._attr_names_with_defaults and
attr not in kwargs):
value = getattr(self, attr)
if copy:
value = value.copy()
kwargs[attr] = value
return self.__class__(data, copy=False, **kwargs)
def replicate(self, copy=False, **kwargs):
"""
Return a replica of the frame, optionally with new frame attributes.
The replica is a new frame object that has the same data as this frame
object and with frame attributes overriden if they are provided as extra
keyword arguments to this method. If ``copy`` is set to `True` then a
copy of the internal arrays will be made. Otherwise the replica will
use a reference to the original arrays when possible to save memory. The
internal arrays are normally not changeable by the user so in most cases
it should not be necessary to set ``copy`` to `True`.
Parameters
----------
copy : bool, optional
If True, the resulting object is a copy of the data. When False,
references are used where possible. This rule also applies to the
frame attributes.
Any additional keywords are treated as frame attributes to be set on the
new frame object.
Returns
-------
frameobj : same as this frame
Replica of this object, but possibly with new frame attributes.
"""
return self._replicate(self.data, copy=copy, **kwargs)
def replicate_without_data(self, copy=False, **kwargs):
"""
Return a replica without data, optionally with new frame attributes.
The replica is a new frame object without data but with the same frame
attributes as this object, except where overriden by extra keyword
arguments to this method. The ``copy`` keyword determines if the frame
attributes are truly copied vs being references (which saves memory for
cases where frame attributes are large).
This method is essentially the converse of `realize_frame`.
Parameters
----------
copy : bool, optional
If True, the resulting object has copies of the frame attributes.
When False, references are used where possible.
Any additional keywords are treated as frame attributes to be set on the
new frame object.
Returns
-------
frameobj : same as this frame
Replica of this object, but without data and possibly with new frame
attributes.
"""
return self._replicate(None, copy=copy, **kwargs)
def realize_frame(self, representation_type):
"""
Generates a new frame *with new data* from another frame (which may or
may not have data). Roughly speaking, the converse of
`replicate_without_data`.
Parameters
----------
representation_type : `BaseRepresentation`
The representation to use as the data for the new frame.
Returns
-------
frameobj : same as this frame
A new object with the same frame attributes as this one, but
with the ``representation`` as the data.
"""
return self._replicate(representation_type)
def represent_as(self, base, s='base', in_frame_units=False):
"""
Generate and return a new representation of this frame's `data`
as a Representation object.
Note: In order to make an in-place change of the representation
of a Frame or SkyCoord object, set the ``representation``
attribute of that object to the desired new representation, or
use the ``set_representation_cls`` method to also set the differential.
Parameters
----------
base : subclass of BaseRepresentation or string
The type of representation to generate. Must be a *class*
(not an instance), or the string name of the representation
class.
s : subclass of `~astropy.coordinates.BaseDifferential`, str, optional
Class in which any velocities should be represented. Must be
a *class* (not an instance), or the string name of the
differential class. If equal to 'base' (default), inferred from
the base class. If `None`, all velocity information is dropped.
in_frame_units : bool, keyword only
Force the representation units to match the specified units
particular to this frame
Returns
-------
newrep : BaseRepresentation-derived object
A new representation object of this frame's `data`.
Raises
------
AttributeError
If this object had no `data`
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import SkyCoord, CartesianRepresentation
>>> coord = SkyCoord(0*u.deg, 0*u.deg)
>>> coord.represent_as(CartesianRepresentation) # doctest: +FLOAT_CMP
<CartesianRepresentation (x, y, z) [dimensionless]
(1., 0., 0.)>
>>> coord.representation = CartesianRepresentation
>>> coord # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (x, y, z) [dimensionless]
(1., 0., 0.)>
"""
# For backwards compatibility (because in_frame_units used to be the
# 2nd argument), we check to see if `new_differential` is a boolean. If
# it is, we ignore the value of `new_differential` and warn about the
# position change
if isinstance(s, bool):
warnings.warn("The argument position for `in_frame_units` in "
"`represent_as` has changed. Use as a keyword "
"argument if needed.", AstropyWarning)
in_frame_units = s
s = 'base'
# In the future, we may want to support more differentials, in which
# case one probably needs to define **kwargs above and use it here.
# But for now, we only care about the velocity.
repr_classes = _get_repr_classes(base=base, s=s)
representation_cls = repr_classes['base']
# We only keep velocity information
if 's' in self.data.differentials:
differential_cls = repr_classes['s']
elif s is None or s == 'base':
differential_cls = None
else:
raise TypeError('Frame data has no associated differentials '
'(i.e. the frame has no velocity data) - '
'represent_as() only accepts a new '
'representation.')
if differential_cls:
cache_key = (representation_cls.__name__,
differential_cls.__name__, in_frame_units)
else:
cache_key = (representation_cls.__name__, in_frame_units)
cached_repr = self.cache['representation'].get(cache_key)
if not cached_repr:
if differential_cls:
# TODO NOTE: only supports a single differential
data = self.data.represent_as(representation_cls,
differential_cls)
diff = data.differentials['s'] # TODO: assumes velocity
else:
data = self.data.represent_as(representation_cls)
# If the new representation is known to this frame and has a defined
# set of names and units, then use that.
new_attrs = self.representation_info.get(representation_cls)
if new_attrs and in_frame_units:
datakwargs = dict((comp, getattr(data, comp))
for comp in data.components)
for comp, new_attr_unit in zip(data.components, new_attrs['units']):
if new_attr_unit:
datakwargs[comp] = datakwargs[comp].to(new_attr_unit)
data = data.__class__(copy=False, **datakwargs)
if differential_cls:
# the original differential
data_diff = self.data.differentials['s']
# If the new differential is known to this frame and has a
# defined set of names and units, then use that.
new_attrs = self.representation_info.get(differential_cls)
if new_attrs and in_frame_units:
diffkwargs = dict((comp, getattr(diff, comp))
for comp in diff.components)
for comp, new_attr_unit in zip(diff.components,
new_attrs['units']):
# Some special-casing to treat a situation where the
# input data has a UnitSphericalDifferential or a
# RadialDifferential. It is re-represented to the
# frame's differential class (which might be, e.g., a
# dimensional Differential), so we don't want to try to
# convert the empty component units
if (isinstance(data_diff,
(r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential)) and
comp not in data_diff.__class__.attr_classes):
continue
elif (isinstance(data_diff, r.RadialDifferential) and
comp not in data_diff.__class__.attr_classes):
continue
if new_attr_unit and hasattr(diff, comp):
diffkwargs[comp] = diffkwargs[comp].to(new_attr_unit)
diff = diff.__class__(copy=False, **diffkwargs)
# Here we have to bypass using with_differentials() because
# it has a validation check. But because
# .representation_type and .differential_type don't point to
# the original classes, if the input differential is a
# RadialDifferential, it usually gets turned into a
# SphericalCosLatDifferential (or whatever the default is)
# with strange units for the d_lon and d_lat attributes.
# This then causes the dictionary key check to fail (i.e.
# comparison against `diff._get_deriv_key()`)
data._differentials.update({'s': diff})
self.cache['representation'][cache_key] = data
return self.cache['representation'][cache_key]
def transform_to(self, new_frame):
"""
Transform this object's coordinate data to a new frame.
Parameters
----------
new_frame : class or frame object or SkyCoord object
The frame to transform this coordinate frame into.
Returns
-------
transframe
A new object with the coordinate data represented in the
``newframe`` system.
Raises
------
ValueError
If there is no possible transformation route.
"""
from .errors import ConvertError
if self._data is None:
raise ValueError('Cannot transform a frame with no data')
if (getattr(self.data, 'differentials', None) and
hasattr(self, 'obstime') and hasattr(new_frame, 'obstime') and
np.any(self.obstime != new_frame.obstime)):
raise NotImplementedError('You cannot transform a frame that has '
'velocities to another frame at a '
'different obstime. If you think this '
'should (or should not) be possible, '
'please comment at https://github.com/astropy/astropy/issues/6280')
if inspect.isclass(new_frame):
# Use the default frame attributes for this class
new_frame = new_frame()
if hasattr(new_frame, '_sky_coord_frame'):
# Input new_frame is not a frame instance or class and is most
# likely a SkyCoord object.
new_frame = new_frame._sky_coord_frame
trans = frame_transform_graph.get_transform(self.__class__,
new_frame.__class__)
if trans is None:
if new_frame is self.__class__:
# no special transform needed, but should update frame info
return new_frame.realize_frame(self.data)
msg = 'Cannot transform from {0} to {1}'
raise ConvertError(msg.format(self.__class__, new_frame.__class__))
return trans(self, new_frame)
def is_transformable_to(self, new_frame):
"""
Determines if this coordinate frame can be transformed to another
given frame.
Parameters
----------
new_frame : class or frame object
The proposed frame to transform into.
Returns
-------
transformable : bool or str
`True` if this can be transformed to ``new_frame``, `False` if
not, or the string 'same' if ``new_frame`` is the same system as
this object but no transformation is defined.
Notes
-----
A return value of 'same' means the transformation will work, but it will
just give back a copy of this object. The intended usage is::
if coord.is_transformable_to(some_unknown_frame):
coord2 = coord.transform_to(some_unknown_frame)
This will work even if ``some_unknown_frame`` turns out to be the same
frame class as ``coord``. This is intended for cases where the frame
is the same regardless of the frame attributes (e.g. ICRS), but be
aware that it *might* also indicate that someone forgot to define the
transformation between two objects of the same frame class but with
different attributes.
"""
new_frame_cls = new_frame if inspect.isclass(new_frame) else new_frame.__class__
trans = frame_transform_graph.get_transform(self.__class__, new_frame_cls)
if trans is None:
if new_frame_cls is self.__class__:
return 'same'
else:
return False
else:
return True
def is_frame_attr_default(self, attrnm):
"""
Determine whether or not a frame attribute has its value because it's
the default value, or because this frame was created with that value
explicitly requested.
Parameters
----------
attrnm : str
The name of the attribute to check.
Returns
-------
isdefault : bool
True if the attribute ``attrnm`` has its value by default, False if
it was specified at creation of this frame.
"""
return attrnm in self._attr_names_with_defaults
def is_equivalent_frame(self, other):
"""
Checks if this object is the same frame as the ``other`` object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. Note that it does *not* matter what, if any,
data either object has.
Parameters
----------
other : BaseCoordinateFrame
the other frame to check
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a `BaseCoordinateFrame` or subclass.
"""
if self.__class__ == other.__class__:
for frame_attr_name in self.get_frame_attr_names():
if np.any(getattr(self, frame_attr_name) !=
getattr(other, frame_attr_name)):
return False
return True
elif not isinstance(other, BaseCoordinateFrame):
raise TypeError("Tried to do is_equivalent_frame on something that "
"isn't a frame")
else:
return False
def __repr__(self):
frameattrs = self._frame_attrs_repr()
data_repr = self._data_repr()
if frameattrs:
frameattrs = ' ({0})'.format(frameattrs)
if data_repr:
return '<{0} Coordinate{1}: {2}>'.format(self.__class__.__name__,
frameattrs, data_repr)
else:
return '<{0} Frame{1}>'.format(self.__class__.__name__,
frameattrs)
def _data_repr(self):
"""Returns a string representation of the coordinate data."""
if not self.has_data:
return ''
if self.representation:
if (hasattr(self.representation, '_unit_representation') and
isinstance(self.data, self.representation._unit_representation)):
rep_cls = self.data.__class__
else:
rep_cls = self.representation
if 's' in self.data.differentials:
dif_cls = self.get_representation_cls('s')
dif_data = self.data.differentials['s']
if isinstance(dif_data, (r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential)):
dif_cls = dif_data.__class__
else:
dif_cls = None
data = self.represent_as(rep_cls, dif_cls, in_frame_units=True)
data_repr = repr(data)
for nmpref, nmrepr in self.representation_component_names.items():
data_repr = data_repr.replace(nmrepr, nmpref)
else:
data = self.data
data_repr = repr(self.data)
if data_repr.startswith('<' + data.__class__.__name__):
# remove both the leading "<" and the space after the name, as well
# as the trailing ">"
data_repr = data_repr[(len(data.__class__.__name__) + 2):-1]
else:
data_repr = 'Data:\n' + data_repr
if 's' in self.data.differentials:
data_repr_spl = data_repr.split('\n')
if 'has differentials' in data_repr_spl[-1]:
diffrepr = repr(data.differentials['s']).split('\n')
if diffrepr[0].startswith('<'):
diffrepr[0] = ' ' + ' '.join(diffrepr[0].split(' ')[1:])
for frm_nm, rep_nm in self.get_representation_component_names('s').items():
diffrepr[0] = diffrepr[0].replace(rep_nm, frm_nm)
if diffrepr[-1].endswith('>'):
diffrepr[-1] = diffrepr[-1][:-1]
data_repr_spl[-1] = '\n'.join(diffrepr)
data_repr = '\n'.join(data_repr_spl)
return data_repr
def _frame_attrs_repr(self):
"""
Returns a string representation of the frame's attributes, if any.
"""
return ', '.join([attrnm + '=' + str(getattr(self, attrnm))
for attrnm in self.get_frame_attr_names()])
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.misc.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
new = super().__new__(self.__class__)
if hasattr(self, '_representation'):
new._representation = self._representation.copy()
new._attr_names_with_defaults = self._attr_names_with_defaults.copy()
for attr in self.frame_attributes:
_attr = '_' + attr
if attr in self._attr_names_with_defaults:
setattr(new, _attr, getattr(self, _attr))
else:
value = getattr(self, _attr)
if getattr(value, 'size', 1) > 1:
value = apply_method(value)
elif method == 'copy' or method == 'flatten':
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, _attr, value)
if self.has_data:
new._data = apply_method(self.data)
else:
new._data = None
shapes = [getattr(new, '_' + attr).shape
for attr in new.frame_attributes
if (attr not in new._attr_names_with_defaults and
getattr(getattr(new, '_' + attr), 'size', 1) > 1)]
if shapes:
new._no_data_shape = (check_broadcast(*shapes)
if len(shapes) > 1 else shapes[0])
else:
new._no_data_shape = ()
return new
@override__dir__
def __dir__(self):
"""
Override the builtin `dir` behavior to include representation
names.
TODO: dynamic representation transforms (i.e. include cylindrical et al.).
"""
dir_values = set(self.representation_component_names)
dir_values |= set(self.get_representation_component_names('s'))
return dir_values
def __getattr__(self, attr):
"""
Allow access to attributes on the representation and differential as
found via ``self.get_representation_component_names``.
TODO: We should handle dynamic representation transforms here (e.g.,
`.cylindrical`) instead of defining properties as below.
"""
# attr == '_representation' is likely from the hasattr() test in the
# representation property which is used for
# self.representation_component_names.
#
# Prevent infinite recursion here.
if attr.startswith('_'):
return self.__getattribute__(attr) # Raise AttributeError.
repr_names = self.representation_component_names
if attr in repr_names:
if self._data is None:
self.data # this raises the "no data" error by design - doing it
# this way means we don't have to replicate the error message here
rep = self.represent_as(self.representation_type,
in_frame_units=True)
val = getattr(rep, repr_names[attr])
return val
diff_names = self.get_representation_component_names('s')
if attr in diff_names:
if self._data is None:
self.data # see above.
# TODO: this doesn't work for the case when there is only
# unitspherical information. The differential_type gets set to the
# default_differential, which expects full information, so the
# units don't work out
rep = self.represent_as(in_frame_units=True,
**self.get_representation_cls(None))
val = getattr(rep.differentials['s'], diff_names[attr])
return val
return self.__getattribute__(attr) # Raise AttributeError.
def __setattr__(self, attr, value):
# Don't slow down access of private attributes!
if not attr.startswith('_'):
if hasattr(self, 'representation_info'):
repr_attr_names = set()
for representation_attr in self.representation_info.values():
repr_attr_names.update(representation_attr['names'])
if attr in repr_attr_names:
raise AttributeError(
'Cannot set any frame attribute {0}'.format(attr))
super().__setattr__(attr, value)
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
.. note::
If the ``other`` coordinate object is in a different frame, it is
first transformed to the frame of this object. This can lead to
unintutive behavior if not accounted for. Particularly of note is
that ``self.separation(other)`` and ``other.separation(self)`` may
not give the same answer in this case.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
from .angle_utilities import angular_separation
from .angles import Angle
self_unit_sph = self.represent_as(r.UnitSphericalRepresentation)
other_transformed = other.transform_to(self)
other_unit_sph = other_transformed.represent_as(r.UnitSphericalRepresentation)
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(self_unit_sph.lon, self_unit_sph.lat,
other_unit_sph.lon, other_unit_sph.lat)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate system to get the distance to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
from .distances import Distance
if issubclass(self.data.__class__, r.UnitSphericalRepresentation):
raise ValueError('This object does not have a distance; cannot '
'compute 3d separation.')
# do this first just in case the conversion somehow creates a distance
other_in_self_system = other.transform_to(self)
if issubclass(other_in_self_system.__class__, r.UnitSphericalRepresentation):
raise ValueError('The other object does not have a distance; '
'cannot compute 3d separation.')
# drop the differentials to ensure they don't do anything odd in the
# subtraction
self_car = self.data.without_differentials().represent_as(r.CartesianRepresentation)
other_car = other_in_self_system.data.without_differentials().represent_as(r.CartesianRepresentation)
return Distance((self_car - other_car).norm())
@property
def cartesian(self):
"""
Shorthand for a cartesian representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as('cartesian', in_frame_units=True)
@property
def spherical(self):
"""
Shorthand for a spherical representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as('spherical', in_frame_units=True)
@property
def sphericalcoslat(self):
"""
Shorthand for a spherical representation of the positional data and a
`SphericalCosLatDifferential` for the velocity data in this object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as('spherical', 'sphericalcoslat',
in_frame_units=True)
@property
def velocity(self):
"""
Shorthand for retrieving the Cartesian space-motion as a
`CartesianDifferential` object. This is equivalent to calling
``self.cartesian.differentials['s']``.
"""
if 's' not in self.data.differentials:
raise ValueError('Frame has no associated velocity (Differential) '
'data information.')
try:
v = self.cartesian.differentials['s']
except Exception as e:
raise ValueError('Could not retrieve a Cartesian velocity. Your '
'frame must include velocity information for this '
'to work.')
return v
@property
def proper_motion(self):
"""
Shorthand for the two-dimensional proper motion as a
`~astropy.units.Quantity` object with angular velocity units. In the
returned `~astropy.units.Quantity`, ``axis=0`` is the longitude/latitude
dimension so that ``.proper_motion[0]`` is the longitudinal proper
motion and ``.proper_motion[1]`` is latitudinal. The longitudinal proper
motion already includes the cos(latitude) term.
"""
if 's' not in self.data.differentials:
raise ValueError('Frame has no associated velocity (Differential) '
'data information.')
sph = self.represent_as('spherical', 'sphericalcoslat',
in_frame_units=True)
pm_lon = sph.differentials['s'].d_lon_coslat
pm_lat = sph.differentials['s'].d_lat
return np.stack((pm_lon.value,
pm_lat.to(pm_lon.unit).value), axis=0) * pm_lon.unit
@property
def radial_velocity(self):
"""
Shorthand for the radial or line-of-sight velocity as a
`~astropy.units.Quantity` object.
"""
if 's' not in self.data.differentials:
raise ValueError('Frame has no associated velocity (Differential) '
'data information.')
sph = self.represent_as('spherical', in_frame_units=True)
return sph.differentials['s'].d_distance
class GenericFrame(BaseCoordinateFrame):
"""
A frame object that can't store data but can hold any arbitrary frame
attributes. Mostly useful as a utility for the high-level class to store
intermediate frame attributes.
Parameters
----------
frame_attrs : dict
A dictionary of attributes to be used as the frame attributes for this
frame.
"""
name = None # it's not a "real" frame so it doesn't have a name
def __init__(self, frame_attrs):
self.frame_attributes = OrderedDict()
for name, default in frame_attrs.items():
self.frame_attributes[name] = Attribute(default)
setattr(self, '_' + name, default)
super().__init__(None)
def __getattr__(self, name):
if '_' + name in self.__dict__:
return getattr(self, '_' + name)
else:
raise AttributeError('no {0}'.format(name))
def __setattr__(self, name, value):
if name in self.get_frame_attr_names():
raise AttributeError("can't set frame attribute '{0}'".format(name))
else:
super().__setattr__(name, value)
|
01538f0ab7d21321d6791e3ebbf72c132dd798c539ffd9d8c5de5f003612e067 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from warnings import warn
import collections
import socket
import json
import urllib.request
import urllib.error
import urllib.parse
import numpy as np
from .. import units as u
from .. import constants as consts
from ..units.quantity import QuantityInfoBase
from ..utils.exceptions import AstropyUserWarning
from .angles import Longitude, Latitude
from .representation import CartesianRepresentation, CartesianDifferential
from .errors import UnknownSiteException
from ..utils import data, deprecated
try:
# Not guaranteed available at setup time.
from .. import _erfa as erfa
except ImportError:
if not _ASTROPY_SETUP_:
raise
__all__ = ['EarthLocation']
GeodeticLocation = collections.namedtuple('GeodeticLocation', ['lon', 'lat', 'height'])
# Available ellipsoids (defined in erfam.h, with numbers exposed in erfa).
ELLIPSOIDS = ('WGS84', 'GRS80', 'WGS72')
OMEGA_EARTH = u.Quantity(7.292115855306589e-5, 1./u.s)
"""
Rotational velocity of Earth. In UT1 seconds, this would be 2 pi / (24 * 3600),
but we need the value in SI seconds.
See Explanatory Supplement to the Astronomical Almanac, ed. P. Kenneth Seidelmann (1992),
University Science Books.
"""
def _check_ellipsoid(ellipsoid=None, default='WGS84'):
if ellipsoid is None:
ellipsoid = default
if ellipsoid not in ELLIPSOIDS:
raise ValueError('Ellipsoid {0} not among known ones ({1})'
.format(ellipsoid, ELLIPSOIDS))
return ellipsoid
def _get_json_result(url, err_str):
# need to do this here to prevent a series of complicated circular imports
from .name_resolve import NameResolveError
try:
# Retrieve JSON response from Google maps API
resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout)
resp_data = json.loads(resp.read().decode('utf8'))
except urllib.error.URLError as e:
# This catches a timeout error, see:
# http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python
if isinstance(e.reason, socket.timeout):
raise NameResolveError(err_str.format(msg="connection timed out"))
else:
raise NameResolveError(err_str.format(msg=e.reason))
except socket.timeout:
# There are some cases where urllib2 does not catch socket.timeout
# especially while receiving response data on an already previously
# working request
raise NameResolveError(err_str.format(msg="connection timed out"))
results = resp_data.get('results', [])
if not results:
raise NameResolveError(err_str.format(msg="no results returned"))
if resp_data.get('status', None) != 'OK':
raise NameResolveError(err_str.format(msg="unknown failure with Google maps API"))
return results
class EarthLocationInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ('x', 'y', 'z', 'ellipsoid')
def _construct_from_dict(self, map):
# Need to pop ellipsoid off and update post-instantiation. This is
# on the to-fix list in #4261.
ellipsoid = map.pop('ellipsoid')
out = self._parent_cls(**map)
out.ellipsoid = ellipsoid
return out
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new EarthLocation instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : EarthLocation (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Very similar to QuantityInfo.new_like, but the creation of the
# map is different enough that this needs its own rouinte.
# Get merged info attributes shape, dtype, format, description.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'format', 'description'))
# The above raises an error if the dtypes do not match, but returns
# just the string representation, which is not useful, so remove.
attrs.pop('dtype')
# Make empty EarthLocation using the dtype and unit of the last column.
# Use zeros so we do not get problems for possible conversion to
# geodetic coordinates.
shape = (length,) + attrs.pop('shape')
data = u.Quantity(np.zeros(shape=shape, dtype=cols[0].dtype),
unit=cols[0].unit, copy=False)
# Get arguments needed to reconstruct class
map = {key: (data[key] if key in 'xyz' else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs}
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class EarthLocation(u.Quantity):
"""
Location on the Earth.
Initialization is first attempted assuming geocentric (x, y, z) coordinates
are given; if that fails, another attempt is made assuming geodetic
coordinates (longitude, latitude, height above a reference ellipsoid).
When using the geodetic forms, Longitudes are measured increasing to the
east, so west longitudes are negative. Internally, the coordinates are
stored as geocentric.
To ensure a specific type of coordinates is used, use the corresponding
class methods (`from_geocentric` and `from_geodetic`) or initialize the
arguments with names (``x``, ``y``, ``z`` for geocentric; ``lon``, ``lat``,
``height`` for geodetic). See the class methods for details.
Notes
-----
This class fits into the coordinates transformation framework in that it
encodes a position on the `~astropy.coordinates.ITRS` frame. To get a
proper `~astropy.coordinates.ITRS` object from this object, use the ``itrs``
property.
"""
_ellipsoid = 'WGS84'
_location_dtype = np.dtype({'names': ['x', 'y', 'z'],
'formats': [np.float64]*3})
_array_dtype = np.dtype((np.float64, (3,)))
info = EarthLocationInfo()
def __new__(cls, *args, **kwargs):
# TODO: needs copy argument and better dealing with inputs.
if (len(args) == 1 and len(kwargs) == 0 and
isinstance(args[0], EarthLocation)):
return args[0].copy()
try:
self = cls.from_geocentric(*args, **kwargs)
except (u.UnitsError, TypeError) as exc_geocentric:
try:
self = cls.from_geodetic(*args, **kwargs)
except Exception as exc_geodetic:
raise TypeError('Coordinates could not be parsed as either '
'geocentric or geodetic, with respective '
'exceptions "{0}" and "{1}"'
.format(exc_geocentric, exc_geodetic))
return self
@classmethod
def from_geocentric(cls, x, y, z, unit=None):
"""
Location on Earth, initialized from geocentric coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array-like
Cartesian coordinates. If not quantities, ``unit`` should be given.
unit : `~astropy.units.UnitBase` object or None
Physical unit of the coordinate values. If ``x``, ``y``, and/or
``z`` are quantities, they will be converted to this unit.
Raises
------
astropy.units.UnitsError
If the units on ``x``, ``y``, and ``z`` do not match or an invalid
unit is given.
ValueError
If the shapes of ``x``, ``y``, and ``z`` do not match.
TypeError
If ``x`` is not a `~astropy.units.Quantity` and no unit is given.
"""
if unit is None:
try:
unit = x.unit
except AttributeError:
raise TypeError("Geocentric coordinates should be Quantities "
"unless an explicit unit is given.")
else:
unit = u.Unit(unit)
if unit.physical_type != 'length':
raise u.UnitsError("Geocentric coordinates should be in "
"units of length.")
try:
x = u.Quantity(x, unit, copy=False)
y = u.Quantity(y, unit, copy=False)
z = u.Quantity(z, unit, copy=False)
except u.UnitsError:
raise u.UnitsError("Geocentric coordinate units should all be "
"consistent.")
x, y, z = np.broadcast_arrays(x, y, z)
struc = np.empty(x.shape, cls._location_dtype)
struc['x'], struc['y'], struc['z'] = x, y, z
return super().__new__(cls, struc, unit, copy=False)
@classmethod
def from_geodetic(cls, lon, lat, height=0., ellipsoid=None):
"""
Location on Earth, initialized from geodetic coordinates.
Parameters
----------
lon : `~astropy.coordinates.Longitude` or float
Earth East longitude. Can be anything that initialises an
`~astropy.coordinates.Angle` object (if float, in degrees).
lat : `~astropy.coordinates.Latitude` or float
Earth latitude. Can be anything that initialises an
`~astropy.coordinates.Latitude` object (if float, in degrees).
height : `~astropy.units.Quantity` or float, optional
Height above reference ellipsoid (if float, in meters; default: 0).
ellipsoid : str, optional
Name of the reference ellipsoid to use (default: 'WGS84').
Available ellipsoids are: 'WGS84', 'GRS80', 'WGS72'.
Raises
------
astropy.units.UnitsError
If the units on ``lon`` and ``lat`` are inconsistent with angular
ones, or that on ``height`` with a length.
ValueError
If ``lon``, ``lat``, and ``height`` do not have the same shape, or
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geocentric coordinates, the ERFA routine
``gd2gc`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=cls._ellipsoid)
lon = Longitude(lon, u.degree, wrap_angle=180*u.degree, copy=False)
lat = Latitude(lat, u.degree, copy=False)
# don't convert to m by default, so we can use the height unit below.
if not isinstance(height, u.Quantity):
height = u.Quantity(height, u.m, copy=False)
# get geocentric coordinates. Have to give one-dimensional array.
xyz = erfa.gd2gc(getattr(erfa, ellipsoid),
lon.to_value(u.radian),
lat.to_value(u.radian),
height.to_value(u.m))
self = xyz.ravel().view(cls._location_dtype,
cls).reshape(xyz.shape[:-1])
self._unit = u.meter
self._ellipsoid = ellipsoid
return self.to(height.unit)
@classmethod
def of_site(cls, site_name):
"""
Return an object of this class for a known observatory/site by name.
This is intended as a quick convenience function to get basic site
information, not a fully-featured exhaustive registry of observatories
and all their properties.
.. note::
When this function is called, it will attempt to download site
information from the astropy data server. If you would like a site
to be added, issue a pull request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
If a site cannot be found in the registry (i.e., an internet
connection is not available), it will fall back on a built-in list,
In the future, this bundled list might include a version-controlled
list of canonical observatories extracted from the online version,
but it currently only contains the Greenwich Royal Observatory as an
example case.
Parameters
----------
site_name : str
Name of the observatory (case-insensitive).
Returns
-------
site : This class (a `~astropy.coordinates.EarthLocation` or subclass)
The location of the observatory.
See Also
--------
get_site_names : the list of sites that this function can access
"""
registry = cls._get_site_registry()
try:
el = registry[site_name]
except UnknownSiteException as e:
raise UnknownSiteException(e.site, 'EarthLocation.get_site_names', close_names=e.close_names)
if cls is el.__class__:
return el
else:
newel = cls.from_geodetic(*el.to_geodetic())
newel.info.name = el.info.name
return newel
@classmethod
def of_address(cls, address, get_height=False):
"""
Return an object of this class for a given address by querying the Google
maps geocoding API.
This is intended as a quick convenience function to get fast access to
locations. In the background, this just issues a query to the Google maps
geocoding API. It is not meant to be abused! Google uses IP-based query
limiting and will ban your IP if you send more than a few thousand queries
per hour [1]_.
.. warning::
If the query returns more than one location (e.g., searching on
``address='springfield'``), this function will use the **first** returned
location.
Parameters
----------
address : str
The address to get the location for. As per the Google maps API, this
can be a fully specified street address (e.g., 123 Main St., New York,
NY) or a city name (e.g., Danbury, CT), or etc.
get_height : bool (optional)
Use the retrieved location to perform a second query to the Google maps
elevation API to retrieve the height of the input address [2]_.
Returns
-------
location : This class (a `~astropy.coordinates.EarthLocation` or subclass)
The location of the input address.
References
----------
.. [1] https://developers.google.com/maps/documentation/geocoding/intro
.. [2] https://developers.google.com/maps/documentation/elevation/intro
"""
pars = urllib.parse.urlencode({'address': address})
geo_url = "https://maps.googleapis.com/maps/api/geocode/json?{0}".format(pars)
# get longitude and latitude location
err_str = ("Unable to retrieve coordinates for address '{address}'; {{msg}}"
.format(address=address))
geo_result = _get_json_result(geo_url, err_str=err_str)
loc = geo_result[0]['geometry']['location']
if get_height:
pars = {'locations': '{lat:.8f},{lng:.8f}'.format(lat=loc['lat'],
lng=loc['lng'])}
pars = urllib.parse.urlencode(pars)
ele_url = "https://maps.googleapis.com/maps/api/elevation/json?{0}".format(pars)
err_str = ("Unable to retrieve elevation for address '{address}'; {{msg}}"
.format(address=address))
ele_result = _get_json_result(ele_url, err_str=err_str)
height = ele_result[0]['elevation']*u.meter
else:
height = 0.
return cls.from_geodetic(lon=loc['lng']*u.degree,
lat=loc['lat']*u.degree,
height=height)
@classmethod
def get_site_names(cls):
"""
Get list of names of observatories for use with
`~astropy.coordinates.EarthLocation.of_site`.
.. note::
When this function is called, it will first attempt to
download site information from the astropy data server. If it
cannot (i.e., an internet connection is not available), it will fall
back on the list included with astropy (which is a limited and dated
set of sites). If you think a site should be added, issue a pull
request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
Returns
-------
names : list of str
List of valid observatory names
See Also
--------
of_site : Gets the actual location object for one of the sites names
this returns.
"""
return cls._get_site_registry().names
@classmethod
def _get_site_registry(cls, force_download=False, force_builtin=False):
"""
Gets the site registry. The first time this either downloads or loads
from the data file packaged with astropy. Subsequent calls will use the
cached version unless explicitly overridden.
Parameters
----------
force_download : bool or str
If not False, force replacement of the cached registry with a
downloaded version. If a str, that will be used as the URL to
download from (if just True, the default URL will be used).
force_builtin : bool
If True, load from the data file bundled with astropy and set the
cache to that.
returns
-------
reg : astropy.coordinates.sites.SiteRegistry
"""
if force_builtin and force_download:
raise ValueError('Cannot have both force_builtin and force_download True')
if force_builtin:
reg = cls._site_registry = get_builtin_sites()
else:
reg = getattr(cls, '_site_registry', None)
if force_download or not reg:
try:
if isinstance(force_download, str):
reg = get_downloaded_sites(force_download)
else:
reg = get_downloaded_sites()
except OSError:
if force_download:
raise
msg = ('Could not access the online site list. Falling '
'back on the built-in version, which is rather '
'limited. If you want to retry the download, do '
'{0}._get_site_registry(force_download=True)')
warn(AstropyUserWarning(msg.format(cls.__name__)))
reg = get_builtin_sites()
cls._site_registry = reg
return reg
@property
def ellipsoid(self):
"""The default ellipsoid used to convert to geodetic coordinates."""
return self._ellipsoid
@ellipsoid.setter
def ellipsoid(self, ellipsoid):
self._ellipsoid = _check_ellipsoid(ellipsoid)
@property
def geodetic(self):
"""Convert to geodetic coordinates for the default ellipsoid."""
return self.to_geodetic()
def to_geodetic(self, ellipsoid=None):
"""Convert to geodetic coordinates.
Parameters
----------
ellipsoid : str, optional
Reference ellipsoid to use. Default is the one the coordinates
were initialized with. Available are: 'WGS84', 'GRS80', 'WGS72'
Returns
-------
(lon, lat, height) : tuple
The tuple contains instances of `~astropy.coordinates.Longitude`,
`~astropy.coordinates.Latitude`, and `~astropy.units.Quantity`
Raises
------
ValueError
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geodetic coordinates, the ERFA routine
``gc2gd`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=self.ellipsoid)
self_array = self.to(u.meter).view(self._array_dtype, np.ndarray)
lon, lat, height = erfa.gc2gd(getattr(erfa, ellipsoid), self_array)
return GeodeticLocation(
Longitude(lon * u.radian, u.degree,
wrap_angle=180.*u.degree, copy=False),
Latitude(lat * u.radian, u.degree, copy=False),
u.Quantity(height * u.meter, self.unit, copy=False))
@property
@deprecated('2.0', alternative='`lon`', obj_type='property')
def longitude(self):
"""Longitude of the location, for the default ellipsoid."""
return self.geodetic[0]
@property
def lon(self):
"""Longitude of the location, for the default ellipsoid."""
return self.geodetic[0]
@property
@deprecated('2.0', alternative='`lat`', obj_type='property')
def latitude(self):
"""Latitude of the location, for the default ellipsoid."""
return self.geodetic[1]
@property
def lat(self):
"""Longitude of the location, for the default ellipsoid."""
return self.geodetic[1]
@property
def height(self):
"""Height of the location, for the default ellipsoid."""
return self.geodetic[2]
# mostly for symmetry with geodetic and to_geodetic.
@property
def geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities"""
return self.to_geocentric()
def to_geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities"""
return (self.x, self.y, self.z)
def get_itrs(self, obstime=None):
"""
Generates an `~astropy.coordinates.ITRS` object with the location of
this object at the requested ``obstime``.
Parameters
----------
obstime : `~astropy.time.Time` or None
The ``obstime`` to apply to the new `~astropy.coordinates.ITRS`, or
if None, the default ``obstime`` will be used.
Returns
-------
itrs : `~astropy.coordinates.ITRS`
The new object in the ITRS frame
"""
# Broadcast for a single position at multiple times, but don't attempt
# to be more general here.
if obstime and self.size == 1 and obstime.size > 1:
self = np.broadcast_to(self, obstime.shape, subok=True)
# do this here to prevent a series of complicated circular imports
from .builtin_frames import ITRS
return ITRS(x=self.x, y=self.y, z=self.z, obstime=obstime)
itrs = property(get_itrs, doc="""An `~astropy.coordinates.ITRS` object with
for the location of this object at the
default ``obstime``.""")
def get_gcrs(self, obstime):
"""GCRS position with velocity at ``obstime`` as a GCRS coordinate.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
--------
gcrs : `~astropy.coordinates.GCRS` instance
With velocity included.
"""
# do this here to prevent a series of complicated circular imports
from .builtin_frames import GCRS
itrs = self.get_itrs(obstime)
# Assume the observatory itself is fixed on the ground.
# We do a direct assignment rather than an update to avoid validation
# and creation of a new object.
zeros = np.broadcast_to(0. * u.km / u.s, (3,) + itrs.shape, subok=True)
itrs.data.differentials['s'] = CartesianDifferential(zeros)
return itrs.transform_to(GCRS(obstime=obstime))
def get_gcrs_posvel(self, obstime):
"""
Calculate the GCRS position and velocity of this object at the
requested ``obstime``.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
--------
obsgeoloc : `~astropy.coordinates.CartesianRepresentation`
The GCRS position of the object
obsgeovel : `~astropy.coordinates.CartesianRepresentation`
The GCRS velocity of the object
"""
# GCRS position
gcrs_data = self.get_gcrs(obstime).data
obsgeopos = gcrs_data.without_differentials()
obsgeovel = gcrs_data.differentials['s'].to_cartesian()
return obsgeopos, obsgeovel
def gravitational_redshift(self, obstime,
bodies=['sun', 'jupiter', 'moon'],
masses={}):
"""Return the gravitational redshift at this EarthLocation.
Calculates the gravitational redshift, of order 3 m/s, due to the
requested solar system bodies.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the redshift at.
bodies : iterable, optional
The bodies (other than the Earth) to include in the redshift
calculation. List elements should be any body name
`get_body_barycentric` accepts. Defaults to Jupiter, the Sun, and
the Moon. Earth is always included (because the class represents
an *Earth* location).
masses : dict of str to Quantity, optional
The mass or gravitational parameters (G * mass) to assume for the
bodies requested in ``bodies``. Can be used to override the
defaults for the Sun, Jupiter, the Moon, and the Earth, or to
pass in masses for other bodies.
Returns
--------
redshift : `~astropy.units.Quantity`
Gravitational redshift in velocity units at given obstime.
"""
# needs to be here to avoid circular imports
from .solar_system import get_body_barycentric
bodies = list(bodies)
# Ensure earth is included and last in the list.
if 'earth' in bodies:
bodies.remove('earth')
bodies.append('earth')
_masses = {'sun': consts.GM_sun,
'jupiter': consts.GM_jup,
'moon': consts.G * 7.34767309e22*u.kg,
'earth': consts.GM_earth}
_masses.update(masses)
GMs = []
M_GM_equivalency = (u.kg, u.Unit(consts.G * u.kg))
for body in bodies:
try:
GMs.append(_masses[body].to(u.m**3/u.s**2, [M_GM_equivalency]))
except KeyError as exc:
raise KeyError('body "{}" does not have a mass!'.format(body))
except u.UnitsError as exc:
exc.args += ('"masses" argument values must be masses or '
'gravitational parameters',)
raise
positions = [get_body_barycentric(name, obstime) for name in bodies]
# Calculate distances to objects other than earth.
distances = [(pos - positions[-1]).norm() for pos in positions[:-1]]
# Append distance from Earth's center for Earth's contribution.
distances.append(CartesianRepresentation(self.geocentric).norm())
# Get redshifts due to all objects.
redshifts = [-GM / consts.c / distance for (GM, distance) in
zip(GMs, distances)]
# Reverse order of summing, to go from small to big, and to get
# "earth" first, which gives m/s as unit.
return sum(redshifts[::-1])
@property
def x(self):
"""The X component of the geocentric coordinates."""
return self['x']
@property
def y(self):
"""The Y component of the geocentric coordinates."""
return self['y']
@property
def z(self):
"""The Z component of the geocentric coordinates."""
return self['z']
def __getitem__(self, item):
result = super().__getitem__(item)
if result.dtype is self.dtype:
return result.view(self.__class__)
else:
return result.view(u.Quantity)
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if hasattr(obj, '_ellipsoid'):
self._ellipsoid = obj._ellipsoid
def __len__(self):
if self.shape == ():
raise IndexError('0-d EarthLocation arrays cannot be indexed')
else:
return super().__len__()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
# Conversion to another unit in both ``to`` and ``to_value`` goes
# via this routine. To make the regular quantity routines work, we
# temporarily turn the structured array into a regular one.
array_view = self.view(self._array_dtype, np.ndarray)
if equivalencies == []:
equivalencies = self._equivalencies
new_array = self.unit.to(unit, array_view, equivalencies=equivalencies)
return new_array.view(self.dtype).reshape(self.shape)
# need to do this here at the bottom to avoid circular dependencies
from .sites import get_builtin_sites, get_downloaded_sites
|
b0fa1ebf9630f6ac19ec139730118b43632141e7e0797ec6b1a458963e8a03a0 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the fundamental classes used for representing
coordinates in astropy.
"""
from collections import namedtuple
import numpy as np
from . import angle_utilities as util
from .. import units as u
from ..utils import isiterable
from ..utils.compat import NUMPY_LT_1_14_1, NUMPY_LT_1_14_2
__all__ = ['Angle', 'Latitude', 'Longitude']
# these are used by the `hms` and `dms` attributes
hms_tuple = namedtuple('hms_tuple', ('h', 'm', 's'))
dms_tuple = namedtuple('dms_tuple', ('d', 'm', 's'))
signed_dms_tuple = namedtuple('signed_dms_tuple', ('sign', 'd', 'm', 's'))
class Angle(u.SpecificTypeQuantity):
"""
One or more angular value(s) with units equivalent to radians or degrees.
An angle can be specified either as an array, scalar, tuple (see
below), string, `~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports a variety of formats::
Angle('10.2345d')
Angle(['10.2345d', '-20d'])
Angle('1:2:30.43 degrees')
Angle('1 2 0 hours')
Angle(np.arange(1, 8), unit=u.deg)
Angle('1°2′3″')
Angle('1d2m3.4s')
Angle('-1h2m3s')
Angle('-1h2.5m')
Angle('-1:2.5', unit=u.deg)
Angle((10, 11, 12), unit='hourangle') # (h, m, s)
Angle((-1, 2, 3), unit=u.deg) # (d, m, s)
Angle(10.2345 * u.deg)
Angle(Angle(10.2345 * u.deg))
Parameters
----------
angle : `~numpy.array`, scalar, `~astropy.units.Quantity`, :class:`~astropy.coordinates.Angle`
The angle value. If a tuple, will be interpreted as ``(h, m,
s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it
will be interpreted following the rules described above.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : `~astropy.units.UnitBase`, str, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
"""
_equivalent_unit = u.radian
_include_easy_conversion_members = True
def __new__(cls, angle, unit=None, dtype=None, copy=True):
if not isinstance(angle, u.Quantity):
if unit is not None:
unit = cls._convert_unit_to_angle_unit(u.Unit(unit))
if isinstance(angle, tuple):
angle = cls._tuple_to_float(angle, unit)
elif isinstance(angle, str):
angle, angle_unit = util.parse_angle(angle, unit)
if angle_unit is None:
angle_unit = unit
if isinstance(angle, tuple):
angle = cls._tuple_to_float(angle, angle_unit)
if angle_unit is not unit:
# Possible conversion to `unit` will be done below.
angle = u.Quantity(angle, angle_unit, copy=False)
elif (isiterable(angle) and
not (isinstance(angle, np.ndarray) and
angle.dtype.kind not in 'SUVO')):
angle = [Angle(x, unit, copy=False) for x in angle]
return super().__new__(cls, angle, unit, dtype=dtype, copy=copy)
@staticmethod
def _tuple_to_float(angle, unit):
"""
Converts an angle represented as a 3-tuple or 2-tuple into a floating
point number in the given unit.
"""
# TODO: Numpy array of tuples?
if unit == u.hourangle:
return util.hms_to_hours(*angle)
elif unit == u.degree:
return util.dms_to_degrees(*angle)
else:
raise u.UnitsError("Can not parse '{0}' as unit '{1}'"
.format(angle, unit))
@staticmethod
def _convert_unit_to_angle_unit(unit):
return u.hourangle if unit is u.hour else unit
def _set_unit(self, unit):
super()._set_unit(self._convert_unit_to_angle_unit(unit))
@property
def hour(self):
"""
The angle's value in hours (read-only property).
"""
return self.hourangle
@property
def hms(self):
"""
The angle's value in hours, as a named tuple with ``(h, m, s)``
members. (This is a read-only property.)
"""
return hms_tuple(*util.hours_to_hms(self.hourangle))
@property
def dms(self):
"""
The angle's value in degrees, as a named tuple with ``(d, m, s)``
members. (This is a read-only property.)
"""
return dms_tuple(*util.degrees_to_dms(self.degree))
@property
def signed_dms(self):
"""
The angle's value in degrees, as a named tuple with ``(sign, d, m, s)``
members. The ``d``, ``m``, ``s`` are thus always positive, and the sign of
the angle is given by ``sign``. (This is a read-only property.)
This is primarily intended for use with `dms` to generate string
representations of coordinates that are correct for negative angles.
"""
return signed_dms_tuple(np.sign(self.degree),
*util.degrees_to_dms(np.abs(self.degree)))
def to_string(self, unit=None, decimal=False, sep='fromunit',
precision=None, alwayssign=False, pad=False,
fields=3, format=None):
""" A string representation of the angle.
Parameters
----------
unit : `~astropy.units.UnitBase`, optional
Specifies the unit. Must be an angular unit. If not
provided, the unit used to initialize the angle will be
used.
decimal : bool, optional
If `True`, a decimal representation will be used, otherwise
the returned string will be in sexagesimal form.
sep : str, optional
The separator between numbers in a sexagesimal
representation. E.g., if it is ':', the result is
``'12:41:11.1241'``. Also accepts 2 or 3 separators. E.g.,
``sep='hms'`` would give the result ``'12h41m11.1241s'``, or
sep='-:' would yield ``'11-21:17.124'``. Alternatively, the
special string 'fromunit' means 'dms' if the unit is
degrees, or 'hms' if the unit is hours.
precision : int, optional
The level of decimal precision. If ``decimal`` is `True`,
this is the raw precision, otherwise it gives the
precision of the last place of the sexagesimal
representation (seconds). If `None`, or not provided, the
number of decimal places is determined by the value, and
will be between 0-8 decimal places as required.
alwayssign : bool, optional
If `True`, include the sign no matter what. If `False`,
only include the sign if it is negative.
pad : bool, optional
If `True`, include leading zeros when needed to ensure a
fixed number of characters for sexagesimal representation.
fields : int, optional
Specifies the number of fields to display when outputting
sexagesimal notation. For example:
- fields == 1: ``'5d'``
- fields == 2: ``'5d45m'``
- fields == 3: ``'5d45m32.5s'``
By default, all fields are displayed.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'unicode': Return a string containing non-ASCII unicode
characters, such as the degree symbol
Returns
-------
strrepr : str or array
A string representation of the angle. If the angle is an array, this
will be an array with a unicode dtype.
"""
if unit is None:
unit = self.unit
else:
unit = self._convert_unit_to_angle_unit(u.Unit(unit))
separators = {
None: {
u.degree: 'dms',
u.hourangle: 'hms'},
'latex': {
u.degree: [r'^\circ', r'{}^\prime', r'{}^{\prime\prime}'],
u.hourangle: [r'^\mathrm{h}', r'^\mathrm{m}', r'^\mathrm{s}']},
'unicode': {
u.degree: '°′″',
u.hourangle: 'ʰᵐˢ'}
}
if sep == 'fromunit':
if format not in separators:
raise ValueError("Unknown format '{0}'".format(format))
seps = separators[format]
if unit in seps:
sep = seps[unit]
# Create an iterator so we can format each element of what
# might be an array.
if unit is u.degree:
if decimal:
values = self.degree
if precision is not None:
func = ("{0:0." + str(precision) + "f}").format
else:
func = '{0:g}'.format
else:
if sep == 'fromunit':
sep = 'dms'
values = self.degree
func = lambda x: util.degrees_to_string(
x, precision=precision, sep=sep, pad=pad,
fields=fields)
elif unit is u.hourangle:
if decimal:
values = self.hour
if precision is not None:
func = ("{0:0." + str(precision) + "f}").format
else:
func = '{0:g}'.format
else:
if sep == 'fromunit':
sep = 'hms'
values = self.hour
func = lambda x: util.hours_to_string(
x, precision=precision, sep=sep, pad=pad,
fields=fields)
elif unit.is_equivalent(u.radian):
if decimal:
values = self.to_value(unit)
if precision is not None:
func = ("{0:1." + str(precision) + "f}").format
else:
func = "{0:g}".format
elif sep == 'fromunit':
values = self.to_value(unit)
unit_string = unit.to_string(format=format)
if format == 'latex':
unit_string = unit_string[1:-1]
if precision is not None:
def plain_unit_format(val):
return ("{0:0." + str(precision) + "f}{1}").format(
val, unit_string)
func = plain_unit_format
else:
def plain_unit_format(val):
return "{0:g}{1}".format(val, unit_string)
func = plain_unit_format
else:
raise ValueError(
"'{0}' can not be represented in sexagesimal "
"notation".format(
unit.name))
else:
raise u.UnitsError(
"The unit value provided is not an angular unit.")
def do_format(val):
s = func(float(val))
if alwayssign and not s.startswith('-'):
s = '+' + s
if format == 'latex':
s = '${0}$'.format(s)
return s
format_ufunc = np.vectorize(do_format, otypes=['U'])
result = format_ufunc(values)
if result.ndim == 0:
result = result[()]
return result
def wrap_at(self, wrap_angle, inplace=False):
"""
Wrap the `Angle` object at the given ``wrap_angle``.
This method forces all the angle values to be within a contiguous
360 degree range so that ``wrap_angle - 360d <= angle <
wrap_angle``. By default a new Angle object is returned, but if the
``inplace`` argument is `True` then the `Angle` object is wrapped in
place and nothing is returned.
For instance::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20.0, 150.0, 350.0] * u.deg)
>>> a.wrap_at(360 * u.deg).degree # Wrap into range 0 to 360 degrees # doctest: +FLOAT_CMP
array([340., 150., 350.])
>>> a.wrap_at('180d', inplace=True) # Wrap into range -180 to 180 degrees # doctest: +FLOAT_CMP
>>> a.degree # doctest: +FLOAT_CMP
array([-20., 150., -10.])
Parameters
----------
wrap_angle : str, `Angle`, angular `~astropy.units.Quantity`
Specifies a single value for the wrap angle. This can be any
object that can initialize an `Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
inplace : bool
If `True` then wrap the object in place instead of returning
a new `Angle`
Returns
-------
out : Angle or `None`
If ``inplace is False`` (default), return new `Angle` object
with angles wrapped accordingly. Otherwise wrap in place and
return `None`.
"""
wrap_angle = Angle(wrap_angle) # Convert to an Angle
wrapped = np.mod(self - wrap_angle, 360.0 * u.deg) - (360.0 * u.deg - wrap_angle)
if inplace:
self[()] = wrapped
else:
return wrapped
def is_within_bounds(self, lower=None, upper=None):
"""
Check if all angle(s) satisfy ``lower <= angle < upper``
If ``lower`` is not specified (or `None`) then no lower bounds check is
performed. Likewise ``upper`` can be left unspecified. For example::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20, 150, 350] * u.deg)
>>> a.is_within_bounds('0d', '360d')
False
>>> a.is_within_bounds(None, '360d')
True
>>> a.is_within_bounds(-30 * u.deg, None)
True
Parameters
----------
lower : str, `Angle`, angular `~astropy.units.Quantity`, `None`
Specifies lower bound for checking. This can be any object
that can initialize an `Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
upper : str, `Angle`, angular `~astropy.units.Quantity`, `None`
Specifies upper bound for checking. This can be any object
that can initialize an `Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
Returns
-------
is_within_bounds : bool
`True` if all angles satisfy ``lower <= angle < upper``
"""
ok = True
if lower is not None:
ok &= np.all(Angle(lower) <= self)
if ok and upper is not None:
ok &= np.all(self < Angle(upper))
return bool(ok)
def _str_helper(self, format=None):
if self.isscalar:
return self.to_string(format=format)
if NUMPY_LT_1_14_1 or not NUMPY_LT_1_14_2:
def formatter(x):
return x.to_string(format=format)
else:
# In numpy 1.14.1, array2print formatters get passed plain numpy scalars instead
# of subclass array scalars, so we need to recreate an array scalar.
def formatter(x):
return self._new_view(x).to_string(format=format)
return np.array2string(self, formatter={'all': formatter})
def __str__(self):
return self._str_helper()
def _repr_latex_(self):
return self._str_helper(format='latex')
def _no_angle_subclass(obj):
"""Return any Angle subclass objects as an Angle objects.
This is used to ensure that Latitude and Longitude change to Angle
objects when they are used in calculations (such as lon/2.)
"""
if isinstance(obj, tuple):
return tuple(_no_angle_subclass(_obj) for _obj in obj)
return obj.view(Angle) if isinstance(obj, Angle) else obj
class Latitude(Angle):
"""
Latitude-like angle(s) which must be in the range -90 to +90 deg.
A Latitude object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of being constrained
so that::
-90.0 * u.deg <= angle(s) <= +90.0 * u.deg
Any attempt to set a value outside that range will result in a
`ValueError`.
The input angle(s) can be specified either as an array, list,
scalar, tuple (see below), string,
:class:`~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : array, list, scalar, `~astropy.units.Quantity`, `Angle`. The
angle value(s). If a tuple, will be interpreted as ``(h, m, s)`` or
``(d, m, s)`` depending on ``unit``. If a string, it will be
interpreted following the rules described for
:class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : :class:`~astropy.units.UnitBase`, str, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Longitude`.
"""
def __new__(cls, angle, unit=None, **kwargs):
# Forbid creating a Lat from a Long.
if isinstance(angle, Longitude):
raise TypeError("A Latitude angle cannot be created from a Longitude angle")
self = super().__new__(cls, angle, unit=unit, **kwargs)
self._validate_angles()
return self
def _validate_angles(self, angles=None):
"""Check that angles are between -90 and 90 degrees.
If not given, the check is done on the object itself"""
# Convert the lower and upper bounds to the "native" unit of
# this angle. This limits multiplication to two values,
# rather than the N values in `self.value`. Also, the
# comparison is performed on raw arrays, rather than Quantity
# objects, for speed.
if angles is None:
angles = self
lower = u.degree.to(angles.unit, -90.0)
upper = u.degree.to(angles.unit, 90.0)
if np.any(angles.value < lower) or np.any(angles.value > upper):
raise ValueError('Latitude angle(s) must be within -90 deg <= angle <= 90 deg, '
'got {0}'.format(angles.to(u.degree)))
def __setitem__(self, item, value):
# Forbid assigning a Long to a Lat.
if isinstance(value, Longitude):
raise TypeError("A Longitude angle cannot be assigned to a Latitude angle")
# first check bounds
self._validate_angles(value)
super().__setitem__(item, value)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
class LongitudeInfo(u.QuantityInfo):
_represent_as_dict_attrs = u.QuantityInfo._represent_as_dict_attrs + ('wrap_angle',)
class Longitude(Angle):
"""
Longitude-like angle(s) which are wrapped within a contiguous 360 degree range.
A ``Longitude`` object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of a ``wrap_angle``
property. The ``wrap_angle`` specifies that all angle values
represented by the object will be in the range::
wrap_angle - 360 * u.deg <= angle(s) < wrap_angle
The default ``wrap_angle`` is 360 deg. Setting ``wrap_angle=180 *
u.deg`` would instead result in values between -180 and +180 deg.
Setting the ``wrap_angle`` attribute of an existing ``Longitude``
object will result in re-wrapping the angle values in-place.
The input angle(s) can be specified either as an array, list,
scalar, tuple, string, :class:`~astropy.units.Quantity`
or another :class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : array, list, scalar, `~astropy.units.Quantity`,
:class:`~astropy.coordinates.Angle` The angle value(s). If a tuple,
will be interpreted as ``(h, m s)`` or ``(d, m, s)`` depending
on ``unit``. If a string, it will be interpreted following the
rules described for :class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : :class:`~astropy.units.UnitBase`, str, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
wrap_angle : :class:`~astropy.coordinates.Angle` or equivalent, or None
Angle at which to wrap back to ``wrap_angle - 360 deg``.
If ``None`` (default), it will be taken to be 360 deg unless ``angle``
has a ``wrap_angle`` attribute already (i.e., is a ``Longitude``),
in which case it will be taken from there.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Latitude`.
"""
_wrap_angle = None
_default_wrap_angle = Angle(360 * u.deg)
info = LongitudeInfo()
def __new__(cls, angle, unit=None, wrap_angle=None, **kwargs):
# Forbid creating a Long from a Lat.
if isinstance(angle, Latitude):
raise TypeError("A Longitude angle cannot be created from "
"a Latitude angle.")
self = super().__new__(cls, angle, unit=unit, **kwargs)
if wrap_angle is None:
wrap_angle = getattr(angle, 'wrap_angle', self._default_wrap_angle)
self.wrap_angle = wrap_angle
return self
def __setitem__(self, item, value):
# Forbid assigning a Lat to a Long.
if isinstance(value, Latitude):
raise TypeError("A Latitude angle cannot be assigned to a Longitude angle")
super().__setitem__(item, value)
self._wrap_internal()
def _wrap_internal(self):
"""
Wrap the internal values in the Longitude object. Using the
:meth:`~astropy.coordinates.Angle.wrap_at` method causes
recursion.
"""
# Convert the wrap angle and 360 degrees to the native unit of
# this Angle, then do all the math on raw Numpy arrays rather
# than Quantity objects for speed.
a360 = u.degree.to(self.unit, 360.0)
wrap_angle = self.wrap_angle.to_value(self.unit)
wrap_angle_floor = wrap_angle - a360
self_angle = self.value
# Do the wrapping, but only if any angles need to be wrapped
if np.any(self_angle < wrap_angle_floor) or np.any(self_angle >= wrap_angle):
wrapped = np.mod(self_angle - wrap_angle, a360) + wrap_angle_floor
value = u.Quantity(wrapped, self.unit)
super().__setitem__((), value)
@property
def wrap_angle(self):
return self._wrap_angle
@wrap_angle.setter
def wrap_angle(self, value):
self._wrap_angle = Angle(value)
self._wrap_internal()
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._wrap_angle = getattr(obj, '_wrap_angle',
self._default_wrap_angle)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
|
27fe05aa3638d4828d9573f5f05c6586938e7f62a77a03fd3879f08a484daac8 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Dependencies
import numpy as np
import warnings
# Project
from .. import units as u
from ..utils.exceptions import AstropyDeprecationWarning
from ..utils import OrderedDescriptor, ShapedLikeNDArray
__all__ = ['Attribute', 'TimeAttribute', 'QuantityAttribute',
'EarthLocationAttribute', 'CoordinateAttribute',
'CartesianRepresentationAttribute',
'DifferentialAttribute']
class Attribute(OrderedDescriptor):
"""A non-mutable data descriptor to hold a frame attribute.
This class must be used to define frame attributes (e.g. ``equinox`` or
``obstime``) that are included in a frame class definition.
Examples
--------
The `~astropy.coordinates.FK4` class uses the following class attributes::
class FK4(BaseCoordinateFrame):
equinox = TimeAttribute(default=_EQUINOX_B1950)
obstime = TimeAttribute(default=None,
secondary_attribute='equinox')
This means that ``equinox`` and ``obstime`` are available to be set as
keyword arguments when creating an ``FK4`` class instance and are then
accessible as instance attributes. The instance value for the attribute
must be stored in ``'_' + <attribute_name>`` by the frame ``__init__``
method.
Note in this example that ``equinox`` and ``obstime`` are time attributes
and use the ``TimeAttributeFrame`` class. This subclass overrides the
``convert_input`` method to validate and convert inputs into a ``Time``
object.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
_class_attribute_ = 'frame_attributes'
_name_attribute_ = 'name'
name = '<unbound>'
def __init__(self, default=None, secondary_attribute=''):
self.default = default
self.secondary_attribute = secondary_attribute
super().__init__()
def convert_input(self, value):
"""
Validate the input ``value`` and convert to expected attribute class.
The base method here does nothing, but subclasses can implement this
as needed. The method should catch any internal exceptions and raise
ValueError with an informative message.
The method returns the validated input along with a boolean that
indicates whether the input value was actually converted. If the input
value was already the correct type then the ``converted`` return value
should be ``False``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
output_value
The ``value`` converted to the correct type (or just ``value`` if
``converted`` is False)
converted : bool
True if the conversion was actually performed, False otherwise.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
return value, False
def __get__(self, instance, frame_cls=None):
if instance is None:
out = self.default
else:
out = getattr(instance, '_' + self.name, self.default)
if out is None:
out = getattr(instance, self.secondary_attribute, self.default)
out, converted = self.convert_input(out)
if instance is not None:
instance_shape = getattr(instance, 'shape', None)
if instance_shape is not None and (getattr(out, 'size', 1) > 1 and
out.shape != instance_shape):
# If the shapes do not match, try broadcasting.
try:
if isinstance(out, ShapedLikeNDArray):
out = out._apply(np.broadcast_to, shape=instance_shape,
subok=True)
else:
out = np.broadcast_to(out, instance_shape, subok=True)
except ValueError:
# raise more informative exception.
raise ValueError(
"attribute {0} should be scalar or have shape {1}, "
"but is has shape {2} and could not be broadcast."
.format(self.name, instance_shape, out.shape))
converted = True
if converted:
setattr(instance, '_' + self.name, out)
return out
def __set__(self, instance, val):
raise AttributeError('Cannot set frame attribute')
class TimeAttribute(Attribute):
"""
Frame attribute descriptor for quantities that are Time objects.
See the `~astropy.coordinates.Attribute` API doc for further
information.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Convert input value to a Time object and validate by running through
the Time constructor. Also check that the input was a scalar.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from ..time import Time
if value is None:
return None, False
if isinstance(value, Time):
out = value
converted = False
else:
try:
out = Time(value)
except Exception as err:
raise ValueError(
'Invalid time input {0}={1!r}\n{2}'.format(self.name,
value, err))
converted = True
# Set attribute as read-only for arrays (not allowed by numpy
# for array scalars)
if out.shape:
out.writeable = False
return out, converted
class CartesianRepresentationAttribute(Attribute):
"""
A frame attribute that is a CartesianRepresentation with specified units.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit object or None
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
"""
def __init__(self, default=None, secondary_attribute='', unit=None):
super().__init__(default, secondary_attribute)
self.unit = unit
def convert_input(self, value):
"""
Checks that the input is a CartesianRepresentation with the correct
unit, or the special value ``[0, 0, 0]``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if (isinstance(value, list) and len(value) == 3 and
all(v == 0 for v in value) and self.unit is not None):
return CartesianRepresentation(np.zeros(3) * self.unit), True
else:
# is it a CartesianRepresentation with correct unit?
if hasattr(value, 'xyz') and value.xyz.unit == self.unit:
return value, False
converted = True
# if it's a CartesianRepresentation, get the xyz Quantity
value = getattr(value, 'xyz', value)
if not hasattr(value, 'unit'):
raise TypeError('tried to set a {0} with something that does '
'not have a unit.'
.format(self.__class__.__name__))
value = value.to(self.unit)
# now try and make a CartesianRepresentation.
cartrep = CartesianRepresentation(value, copy=False)
return cartrep, converted
class QuantityAttribute(Attribute):
"""
A frame attribute that is a quantity with specified units and shape
(optionally).
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit object or None
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
shape : tuple or None
If given, specifies the shape the attribute must be
"""
def __init__(self, default=None, secondary_attribute='', unit=None, shape=None):
super().__init__(default, secondary_attribute)
self.unit = unit
self.shape = shape
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if np.all(value == 0) and self.unit is not None:
return u.Quantity(np.zeros(self.shape), self.unit), True
else:
if not hasattr(value, 'unit') and self.unit != u.dimensionless_unscaled:
raise TypeError('Tried to set a QuantityAttribute with '
'something that does not have a unit.')
oldvalue = value
value = u.Quantity(oldvalue, self.unit, copy=False)
if self.shape is not None and value.shape != self.shape:
raise ValueError('The provided value has shape "{0}", but '
'should have shape "{1}"'.format(value.shape,
self.shape))
converted = oldvalue is not value
return value, converted
class EarthLocationAttribute(Attribute):
"""
A frame attribute that can act as a `~astropy.coordinates.EarthLocation`.
It can be created as anything that can be transformed to the
`~astropy.coordinates.ITRS` frame, but always presents as an `EarthLocation`
when accessed after creation.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
elif isinstance(value, EarthLocation):
return value, False
else:
# we have to do the import here because of some tricky circular deps
from .builtin_frames import ITRS
if not hasattr(value, 'transform_to'):
raise ValueError('"{0}" was passed into an '
'EarthLocationAttribute, but it does not have '
'"transform_to" method'.format(value))
itrsobj = value.transform_to(ITRS)
return itrsobj.earth_location, True
class CoordinateAttribute(Attribute):
"""
A frame attribute which is a coordinate object. It can be given as a
low-level frame class *or* a `~astropy.coordinates.SkyCoord`, but will
always be converted to the low-level frame class when accessed.
Parameters
----------
frame : a coordinate frame class
The type of frame this attribute can be
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, frame, default=None, secondary_attribute=''):
self._frame = frame
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a SkyCoord with the necessary units (or the
special value ``None``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
elif isinstance(value, self._frame):
return value, False
else:
if not hasattr(value, 'transform_to'):
raise ValueError('"{0}" was passed into a '
'CoordinateAttribute, but it does not have '
'"transform_to" method'.format(value))
transformedobj = value.transform_to(self._frame)
if hasattr(transformedobj, 'frame'):
transformedobj = transformedobj.frame
return transformedobj, True
class DifferentialAttribute(Attribute):
"""A frame attribute which is a differential instance.
The optional ``allowed_classes`` argument allows specifying a restricted
set of valid differential classes to check the input against. Otherwise,
any `~astropy.coordinates.BaseDifferential` subclass instance is valid.
Parameters
----------
default : object
Default value for the attribute if not provided
allowed_classes : tuple, optional
A list of allowed differential classes for this attribute to have.
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, default=None, allowed_classes=None,
secondary_attribute=''):
if allowed_classes is not None:
self.allowed_classes = tuple(allowed_classes)
else:
self.allowed_classes = BaseDifferential
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a differential object and is one of the
allowed class types.
Parameters
----------
value : object
Input value.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if not isinstance(value, self.allowed_classes):
raise TypeError('Tried to set a DifferentialAttribute with '
'an unsupported Differential type {0}. Allowed '
'classes are: {1}'
.format(value.__class__,
self.allowed_classes))
return value, True
# Backwards-compatibility: these are the only classes that were previously
# released in v1.3
class FrameAttribute(Attribute):
def __init__(self, *args, **kwargs):
warnings.warn("FrameAttribute has been renamed to Attribute.",
AstropyDeprecationWarning)
super().__init__(*args, **kwargs)
class TimeFrameAttribute(TimeAttribute):
def __init__(self, *args, **kwargs):
warnings.warn("TimeFrameAttribute has been renamed to TimeAttribute.",
AstropyDeprecationWarning)
super().__init__(*args, **kwargs)
class QuantityFrameAttribute(QuantityAttribute):
def __init__(self, *args, **kwargs):
warnings.warn("QuantityFrameAttribute has been renamed to "
"QuantityAttribute.", AstropyDeprecationWarning)
super().__init__(*args, **kwargs)
class CartesianRepresentationFrameAttribute(CartesianRepresentationAttribute):
def __init__(self, *args, **kwargs):
warnings.warn("CartesianRepresentationFrameAttribute has been renamed "
"to CartesianRepresentationAttribute.",
AstropyDeprecationWarning)
super().__init__(*args, **kwargs)
# do this here to prevent a series of complicated circular imports
from .earth import EarthLocation
from .representation import CartesianRepresentation, BaseDifferential
|
e2030a5089f21f25e5561f8de252a9d07d9f2861e65d8e6aacaa9e7b2f4b1597 | """
Implements the wrapper for the Astropy test runner in the form of the
``./setup.py test`` distutils command.
"""
import os
import stat
import shutil
import subprocess
import sys
import tempfile
from distutils import log
from contextlib import contextmanager
from setuptools import Command
@contextmanager
def _suppress_stdout():
'''
A context manager to temporarily disable stdout.
Used later when installing a temporary copy of astropy to avoid a
very verbose output.
'''
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
class FixRemoteDataOption(type):
"""
This metaclass is used to catch cases where the user is running the tests
with --remote-data. We've now changed the --remote-data option so that it
takes arguments, but we still want --remote-data to work as before and to
enable all remote tests. With this metaclass, we can modify sys.argv
before distutils/setuptools try to parse the command-line options.
"""
def __init__(cls, name, bases, dct):
try:
idx = sys.argv.index('--remote-data')
except ValueError:
pass
else:
sys.argv[idx] = '--remote-data=any'
try:
idx = sys.argv.index('-R')
except ValueError:
pass
else:
sys.argv[idx] = '-R=any'
return super(FixRemoteDataOption, cls).__init__(name, bases, dct)
class AstropyTest(Command, metaclass=FixRemoteDataOption):
description = 'Run the tests for this package'
user_options = [
('package=', 'P',
"The name of a specific package to test, e.g. 'io.fits' or 'utils'. "
"Accepts comma separated string to specify multiple packages. "
"If nothing is specified, all default tests are run."),
('test-path=', 't',
'Specify a test location by path. If a relative path to a .py file, '
'it is relative to the built package, so e.g., a leading "astropy/" '
'is necessary. If a relative path to a .rst file, it is relative to '
'the directory *below* the --docs-path directory, so a leading '
'"docs/" is usually necessary. May also be an absolute path.'),
('verbose-results', 'V',
'Turn on verbose output from pytest.'),
('plugins=', 'p',
'Plugins to enable when running pytest.'),
('pastebin=', 'b',
"Enable pytest pastebin output. Either 'all' or 'failed'."),
('args=', 'a',
'Additional arguments to be passed to pytest.'),
('remote-data=', 'R', 'Run tests that download remote data. Should be '
'one of none/astropy/any (defaults to none).'),
('pep8', '8',
'Enable PEP8 checking and disable regular tests. '
'Requires the pytest-pep8 plugin.'),
('pdb', 'd',
'Start the interactive Python debugger on errors.'),
('coverage', 'c',
'Create a coverage report. Requires the coverage package.'),
('open-files', 'o', 'Fail if any tests leave files open. Requires the '
'psutil package.'),
('parallel=', 'j',
'Run the tests in parallel on the specified number of '
'CPUs. If "auto", all the cores on the machine will be '
'used. Requires the pytest-xdist plugin.'),
('docs-path=', None,
'The path to the documentation .rst files. If not provided, and '
'the current directory contains a directory called "docs", that '
'will be used.'),
('skip-docs', None,
"Don't test the documentation .rst files."),
('repeat=', None,
'How many times to repeat each test (can be used to check for '
'sporadic failures).'),
('temp-root=', None,
'The root directory in which to create the temporary testing files. '
'If unspecified the system default is used (e.g. /tmp) as explained '
'in the documentation for tempfile.mkstemp.'),
('verbose-install', None,
'Turn on terminal output from the installation of astropy in a '
'temporary folder.'),
('readonly', None,
'Make the temporary installation being tested read-only.')
]
package_name = ''
def initialize_options(self):
self.package = None
self.test_path = None
self.verbose_results = False
self.plugins = None
self.pastebin = None
self.args = None
self.remote_data = 'none'
self.pep8 = False
self.pdb = False
self.coverage = False
self.open_files = False
self.parallel = 0
self.docs_path = None
self.skip_docs = False
self.repeat = None
self.temp_root = None
self.verbose_install = False
self.readonly = False
def finalize_options(self):
# Normally we would validate the options here, but that's handled in
# run_tests
pass
def generate_testing_command(self):
"""
Build a Python script to run the tests.
"""
cmd_pre = '' # Commands to run before the test function
cmd_post = '' # Commands to run after the test function
if self.coverage:
pre, post = self._generate_coverage_commands()
cmd_pre += pre
cmd_post += post
set_flag = "import builtins; builtins._ASTROPY_TEST_ = True"
cmd = ('{cmd_pre}{0}; import {1.package_name}, sys; result = ('
'{1.package_name}.test('
'package={1.package!r}, '
'test_path={1.test_path!r}, '
'args={1.args!r}, '
'plugins={1.plugins!r}, '
'verbose={1.verbose_results!r}, '
'pastebin={1.pastebin!r}, '
'remote_data={1.remote_data!r}, '
'pep8={1.pep8!r}, '
'pdb={1.pdb!r}, '
'open_files={1.open_files!r}, '
'parallel={1.parallel!r}, '
'docs_path={1.docs_path!r}, '
'skip_docs={1.skip_docs!r}, '
'add_local_eggs_to_path=True, ' # see _build_temp_install below
'repeat={1.repeat!r})); '
'{cmd_post}'
'sys.exit(result)')
return cmd.format(set_flag, self, cmd_pre=cmd_pre, cmd_post=cmd_post)
def run(self):
"""
Run the tests!
"""
# Install the runtime dependencies.
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
# Ensure there is a doc path
if self.docs_path is None:
cfg_docs_dir = self.distribution.get_option_dict('build_docs').get('source_dir', None)
# Some affiliated packages use this.
# See astropy/package-template#157
if cfg_docs_dir is not None and os.path.exists(cfg_docs_dir[1]):
self.docs_path = os.path.abspath(cfg_docs_dir[1])
# fall back on a default path of "docs"
elif os.path.exists('docs'): # pragma: no cover
self.docs_path = os.path.abspath('docs')
# Build a testing install of the package
self._build_temp_install()
# Install the test dependencies
# NOTE: we do this here after _build_temp_install because there is
# a weird but which occurs if psutil is installed in this way before
# astropy is built, Cython can have segmentation fault. Strange, eh?
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
# Copy any additional dependencies that may have been installed via
# tests_requires or install_requires. We then pass the
# add_local_eggs_to_path=True option to package.test() to make sure the
# eggs get included in the path.
if os.path.exists('.eggs'):
shutil.copytree('.eggs', os.path.join(self.testing_path, '.eggs'))
# This option exists so that we can make sure that the tests don't
# write to an installed location.
if self.readonly:
log.info('changing permissions of temporary installation to read-only')
self._change_permissions_testing_path(writable=False)
# Run everything in a try: finally: so that the tmp dir gets deleted.
try:
# Construct this modules testing command
cmd = self.generate_testing_command()
# Run the tests in a subprocess--this is necessary since
# new extension modules may have appeared, and this is the
# easiest way to set up a new environment
testproc = subprocess.Popen(
[sys.executable, '-c', cmd],
cwd=self.testing_path, close_fds=False)
retcode = testproc.wait()
except KeyboardInterrupt:
import signal
# If a keyboard interrupt is handled, pass it to the test
# subprocess to prompt pytest to initiate its teardown
testproc.send_signal(signal.SIGINT)
retcode = testproc.wait()
finally:
# Remove temporary directory
if self.readonly:
self._change_permissions_testing_path(writable=True)
shutil.rmtree(self.tmp_dir)
raise SystemExit(retcode)
def _build_temp_install(self):
"""
Install the package and to a temporary directory for the purposes of
testing. This allows us to test the install command, include the
entry points, and also avoids creating pyc and __pycache__ directories
inside the build directory
"""
# On OSX the default path for temp files is under /var, but in most
# cases on OSX /var is actually a symlink to /private/var; ensure we
# dereference that link, because py.test is very sensitive to relative
# paths...
tmp_dir = tempfile.mkdtemp(prefix=self.package_name + '-test-',
dir=self.temp_root)
self.tmp_dir = os.path.realpath(tmp_dir)
log.info('installing to temporary directory: {0}'.format(self.tmp_dir))
# We now install the package to the temporary directory. We do this
# rather than build and copy because this will ensure that e.g. entry
# points work.
self.reinitialize_command('install')
install_cmd = self.distribution.get_command_obj('install')
install_cmd.prefix = self.tmp_dir
if self.verbose_install:
self.run_command('install')
else:
with _suppress_stdout():
self.run_command('install')
# We now get the path to the site-packages directory that was created
# inside self.tmp_dir
install_cmd = self.get_finalized_command('install')
self.testing_path = install_cmd.install_lib
# Ideally, docs_path is set properly in run(), but if it is still
# not set here, do not pretend it is, otherwise bad things happen.
# See astropy/package-template#157
if self.docs_path is not None:
new_docs_path = os.path.join(self.testing_path,
os.path.basename(self.docs_path))
shutil.copytree(self.docs_path, new_docs_path)
self.docs_path = new_docs_path
shutil.copy('setup.cfg', self.testing_path)
def _change_permissions_testing_path(self, writable=False):
if writable:
basic_flags = stat.S_IRUSR | stat.S_IWUSR
else:
basic_flags = stat.S_IRUSR
for root, dirs, files in os.walk(self.testing_path):
for dirname in dirs:
os.chmod(os.path.join(root, dirname), basic_flags | stat.S_IXUSR)
for filename in files:
os.chmod(os.path.join(root, filename), basic_flags)
def _generate_coverage_commands(self):
"""
This method creates the post and pre commands if coverage is to be
generated
"""
if self.parallel != 0:
raise ValueError(
"--coverage can not be used with --parallel")
try:
import coverage # pylint: disable=W0611
except ImportError:
raise ImportError(
"--coverage requires that the coverage package is "
"installed.")
# Don't use get_pkg_data_filename here, because it
# requires importing astropy.config and thus screwing
# up coverage results for those packages.
coveragerc = os.path.join(
self.testing_path, self.package_name.replace('.', '/'),
'tests', 'coveragerc')
with open(coveragerc, 'r') as fd:
coveragerc_content = fd.read()
coveragerc_content = coveragerc_content.replace(
"{packagename}", self.package_name.replace('.', '/'))
tmp_coveragerc = os.path.join(self.tmp_dir, 'coveragerc')
with open(tmp_coveragerc, 'wb') as tmp:
tmp.write(coveragerc_content.encode('utf-8'))
cmd_pre = (
'import coverage; '
'cov = coverage.coverage(data_file=r"{0}", config_file=r"{1}"); '
'cov.start();'.format(
os.path.abspath(".coverage"), os.path.abspath(tmp_coveragerc)))
cmd_post = (
'cov.stop(); '
'from astropy.tests.helper import _save_coverage; '
'_save_coverage(cov, result, r"{0}", r"{1}");'.format(
os.path.abspath('.'), os.path.abspath(self.testing_path)))
return cmd_pre, cmd_post
|
8015a3c922db498f0791b2b1ce9eea59ccade3b01428c1c2b1213a638908f783 | """Implements the Astropy TestRunner which is a thin wrapper around py.test."""
import inspect
import os
import glob
import copy
import shlex
import sys
import tempfile
import warnings
import importlib
from collections import OrderedDict
from importlib.util import find_spec
from ..config.paths import set_temp_config, set_temp_cache
from ..utils import wraps, find_current_module
from ..utils.exceptions import AstropyWarning, AstropyDeprecationWarning
__all__ = ['TestRunner', 'TestRunnerBase', 'keyword']
class keyword:
"""
A decorator to mark a method as keyword argument for the ``TestRunner``.
Parameters
----------
default_value : `object`
The default value for the keyword argument. (Default: `None`)
priority : `int`
keyword argument methods are executed in order of descending priority.
"""
def __init__(self, default_value=None, priority=0):
self.default_value = default_value
self.priority = priority
def __call__(self, f):
def keyword(*args, **kwargs):
return f(*args, **kwargs)
keyword._default_value = self.default_value
keyword._priority = self.priority
# Set __doc__ explicitly here rather than using wraps because we want
# to keep the function name as keyword so we can inspect it later.
keyword.__doc__ = f.__doc__
return keyword
class TestRunnerBase:
"""
The base class for the TestRunner.
A test runner can be constructed by creating a subclass of this class and
defining 'keyword' methods. These are methods that have the
`~astropy.tests.runner.keyword` decorator, these methods are used to
construct allowed keyword arguments to the
`~astropy.tests.runner.TestRunnerBase.run_tests` method as a way to allow
customization of individual keyword arguments (and associated logic)
without having to re-implement the whole
`~astropy.tests.runner.TestRunnerBase.run_tests` method.
Examples
--------
A simple keyword method::
class MyRunner(TestRunnerBase):
@keyword('default_value'):
def spam(self, spam, kwargs):
\"\"\"
spam : `str`
The parameter description for the run_tests docstring.
\"\"\"
# Return value must be a list with a CLI parameter for pytest.
return ['--spam={}'.format(spam)]
"""
def __init__(self, base_path):
self.base_path = os.path.abspath(base_path)
def __new__(cls, *args, **kwargs):
# Before constructing the class parse all the methods that have been
# decorated with ``keyword``.
# The objective of this method is to construct a default set of keyword
# arguments to the ``run_tests`` method. It does this by inspecting the
# methods of the class for functions with the name ``keyword`` which is
# the name of the decorator wrapping function. Once it has created this
# dictionary, it also formats the docstring of ``run_tests`` to be
# comprised of the docstrings for the ``keyword`` methods.
# To add a keyword argument to the ``run_tests`` method, define a new
# method decorated with ``@keyword`` and with the ``self, name, kwargs``
# signature.
# Get all 'function' members as the wrapped methods are functions
functions = inspect.getmembers(cls, predicate=inspect.isfunction)
# Filter out anything that's not got the name 'keyword'
keywords = filter(lambda func: func[1].__name__ == 'keyword', functions)
# Sort all keywords based on the priority flag.
sorted_keywords = sorted(keywords, key=lambda x: x[1]._priority, reverse=True)
cls.keywords = OrderedDict()
doc_keywords = ""
for name, func in sorted_keywords:
# Here we test if the function has been overloaded to return
# NotImplemented which is the way to disable arguments on
# subclasses. If it has been disabled we need to remove it from the
# default keywords dict. We do it in the try except block because
# we do not have access to an instance of the class, so this is
# going to error unless the method is just doing `return
# NotImplemented`.
try:
# Second argument is False, as it is normally a bool.
# The other two are placeholders for objects.
if func(None, False, None) is NotImplemented:
continue
except Exception:
pass
# Construct the default kwargs dict and docstring
cls.keywords[name] = func._default_value
if func.__doc__:
doc_keywords += ' '*8
doc_keywords += func.__doc__.strip()
doc_keywords += '\n\n'
cls.run_tests.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords)
return super(TestRunnerBase, cls).__new__(cls)
def _generate_args(self, **kwargs):
# Update default values with passed kwargs
# but don't modify the defaults
keywords = copy.deepcopy(self.keywords)
keywords.update(kwargs)
# Iterate through the keywords (in order of priority)
args = []
for keyword in keywords.keys():
func = getattr(self, keyword)
result = func(keywords[keyword], keywords)
# Allow disabling of options in a subclass
if result is NotImplemented:
raise TypeError("run_tests() got an unexpected keyword argument {}".format(keyword))
# keyword methods must return a list
if not isinstance(result, list):
raise TypeError("{} keyword method must return a list".format(keyword))
args += result
return args
RUN_TESTS_DOCSTRING = \
"""
Run the tests for the package.
This method builds arguments for and then calls ``pytest.main``.
Parameters
----------
{keywords}
"""
_required_dependancies = ['pytest', 'pytest_remotedata', 'pytest_doctestplus']
_missing_dependancy_error = "Test dependencies are missing. You should install the 'pytest-astropy' package."
@classmethod
def _has_test_dependencies(cls): # pragma: no cover
# Using the test runner will not work without these dependencies, but
# pytest-openfiles is optional, so it's not listed here.
for module in cls._required_dependancies:
spec = find_spec(module)
# Checking loader accounts for packages that were uninstalled
if spec is None or spec.loader is None:
raise RuntimeError(cls._missing_dependancy_error)
def run_tests(self, **kwargs):
# The following option will include eggs inside a .eggs folder in
# sys.path when running the tests. This is possible so that when
# runnning python setup.py test, test dependencies installed via e.g.
# tests_requires are available here. This is not an advertised option
# since it is only for internal use
if kwargs.pop('add_local_eggs_to_path', False):
# Add each egg to sys.path individually
for egg in glob.glob(os.path.join('.eggs', '*.egg')):
sys.path.insert(0, egg)
# We now need to force reload pkg_resources in case any pytest
# plugins were added above, so that their entry points are picked up
import pkg_resources
importlib.reload(pkg_resources)
self._has_test_dependencies() # pragma: no cover
# The docstring for this method is defined as a class variable.
# This allows it to be built for each subclass in __new__.
# Don't import pytest until it's actually needed to run the tests
import pytest
# Raise error for undefined kwargs
allowed_kwargs = set(self.keywords.keys())
passed_kwargs = set(kwargs.keys())
if not passed_kwargs.issubset(allowed_kwargs):
wrong_kwargs = list(passed_kwargs.difference(allowed_kwargs))
raise TypeError("run_tests() got an unexpected keyword argument {}".format(wrong_kwargs[0]))
args = self._generate_args(**kwargs)
if kwargs.get('plugins', None) is not None:
plugins = kwargs.pop('plugins')
elif self.keywords.get('plugins', None) is not None:
plugins = self.keywords['plugins']
else:
plugins = []
# If we are running the astropy tests with the astropy plugins handle
# the config stuff, otherwise ignore it.
if 'astropy.tests.plugins.config' not in plugins:
return pytest.main(args=args, plugins=plugins)
# override the config locations to not make a new directory nor use
# existing cache or config
astropy_config = tempfile.mkdtemp('astropy_config')
astropy_cache = tempfile.mkdtemp('astropy_cache')
# Have to use nested with statements for cross-Python support
# Note, using these context managers here is superfluous if the
# config_dir or cache_dir options to py.test are in use, but it's
# also harmless to nest the contexts
with set_temp_config(astropy_config, delete=True):
with set_temp_cache(astropy_cache, delete=True):
return pytest.main(args=args, plugins=plugins)
@classmethod
def make_test_runner_in(cls, path):
"""
Constructs a `TestRunner` to run in the given path, and returns a
``test()`` function which takes the same arguments as
`TestRunner.run_tests`.
The returned ``test()`` function will be defined in the module this
was called from. This is used to implement the ``astropy.test()``
function (or the equivalent for affiliated packages).
"""
runner = cls(path)
@wraps(runner.run_tests, ('__doc__',), exclude_args=('self',))
def test(**kwargs):
return runner.run_tests(**kwargs)
module = find_current_module(2)
if module is not None:
test.__module__ = module.__name__
# A somewhat unusual hack, but delete the attached __wrapped__
# attribute--although this is normally used to tell if the function
# was wrapped with wraps, on some version of Python this is also
# used to determine the signature to display in help() which is
# not useful in this case. We don't really care in this case if the
# function was wrapped either
if hasattr(test, '__wrapped__'):
del test.__wrapped__
return test
class TestRunner(TestRunnerBase):
"""
A test runner for astropy tests
"""
def packages_path(self, packages, base_path, error=None, warning=None):
"""
Generates the path for multiple packages.
Parameters
----------
packages : str
Comma separated string of packages.
base_path : str
Base path to the source code or documentation.
error : str
Error message to be raised as ``ValueError``. Individual package
name and path can be accessed by ``{name}`` and ``{path}``
respectively. No error is raised if `None`. (Default: `None`)
warning : str
Warning message to be issued. Individual package
name and path can be accessed by ``{name}`` and ``{path}``
respectively. No warning is issues if `None`. (Default: `None`)
Returns
-------
paths : list of str
List of stings of existing package paths.
"""
packages = packages.split(",")
paths = []
for package in packages:
path = os.path.join(
base_path, package.replace('.', os.path.sep))
if not os.path.isdir(path):
info = {'name': package, 'path': path}
if error is not None:
raise ValueError(error.format(**info))
if warning is not None:
warnings.warn(warning.format(**info))
else:
paths.append(path)
return paths
# Increase priority so this warning is displayed first.
@keyword(priority=1000)
def coverage(self, coverage, kwargs):
if coverage:
warnings.warn(
"The coverage option is ignored on run_tests, since it "
"can not be made to work in that context. Use "
"'python setup.py test --coverage' instead.",
AstropyWarning)
return []
# test_path depends on self.package_path so make sure this runs before
# test_path.
@keyword(priority=1)
def package(self, package, kwargs):
"""
package : str, optional
The name of a specific package to test, e.g. 'io.fits' or
'utils'. Accepts comma separated string to specify multiple
packages. If nothing is specified all default tests are run.
"""
if package is None:
self.package_path = [self.base_path]
else:
error_message = ('package to test is not found: {name} '
'(at path {path}).')
self.package_path = self.packages_path(package, self.base_path,
error=error_message)
if not kwargs['test_path']:
return self.package_path
return []
@keyword()
def test_path(self, test_path, kwargs):
"""
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
"""
all_args = []
# Ensure that the package kwarg has been run.
self.package(kwargs['package'], kwargs)
if test_path:
base, ext = os.path.splitext(test_path)
if ext in ('.rst', ''):
if kwargs['docs_path'] is None:
# This shouldn't happen from "python setup.py test"
raise ValueError(
"Can not test .rst files without a docs_path "
"specified.")
abs_docs_path = os.path.abspath(kwargs['docs_path'])
abs_test_path = os.path.abspath(
os.path.join(abs_docs_path, os.pardir, test_path))
common = os.path.commonprefix((abs_docs_path, abs_test_path))
if os.path.exists(abs_test_path) and common == abs_docs_path:
# Turn on the doctest_rst plugin
all_args.append('--doctest-rst')
test_path = abs_test_path
# Check that the extensions are in the path and not at the end to
# support specifying the name of the test, i.e.
# test_quantity.py::test_unit
if not (os.path.isdir(test_path) or ('.py' in test_path or '.rst' in test_path)):
raise ValueError("Test path must be a directory or a path to "
"a .py or .rst file")
return all_args + [test_path]
return []
@keyword()
def args(self, args, kwargs):
"""
args : str, optional
Additional arguments to be passed to ``pytest.main`` in the ``args``
keyword argument.
"""
if args:
return shlex.split(args, posix=not sys.platform.startswith('win'))
return []
@keyword(default_value=['astropy.tests.plugins.display', 'astropy.tests.plugins.config'])
def plugins(self, plugins, kwargs):
"""
plugins : list, optional
Plugins to be passed to ``pytest.main`` in the ``plugins`` keyword
argument.
"""
# Plugins are handled independently by `run_tests` so we define this
# keyword just for the docstring
return []
@keyword()
def verbose(self, verbose, kwargs):
"""
verbose : bool, optional
Convenience option to turn on verbose output from py.test. Passing
True is the same as specifying ``-v`` in ``args``.
"""
if verbose:
return ['-v']
return []
@keyword()
def pastebin(self, pastebin, kwargs):
"""
pastebin : ('failed', 'all', None), optional
Convenience option for turning on py.test pastebin output. Set to
'failed' to upload info for failed tests, or 'all' to upload info
for all tests.
"""
if pastebin is not None:
if pastebin in ['failed', 'all']:
return ['--pastebin={0}'.format(pastebin)]
else:
raise ValueError("pastebin should be 'failed' or 'all'")
return []
@keyword(default_value='none')
def remote_data(self, remote_data, kwargs):
"""
remote_data : {'none', 'astropy', 'any'}, optional
Controls whether to run tests marked with @pytest.mark.remote_data. This can be
set to run no tests with remote data (``none``), only ones that use
data from http://data.astropy.org (``astropy``), or all tests that
use remote data (``any``). The default is ``none``.
"""
if remote_data is True:
remote_data = 'any'
elif remote_data is False:
remote_data = 'none'
elif remote_data not in ('none', 'astropy', 'any'):
warnings.warn("The remote_data option should be one of "
"none/astropy/any (found {0}). For backward-compatibility, "
"assuming 'any', but you should change the option to be "
"one of the supported ones to avoid issues in "
"future.".format(remote_data),
AstropyDeprecationWarning)
remote_data = 'any'
return ['--remote-data={0}'.format(remote_data)]
@keyword()
def pep8(self, pep8, kwargs):
"""
pep8 : bool, optional
Turn on PEP8 checking via the pytest-pep8 plugin and disable normal
tests. Same as specifying ``--pep8 -k pep8`` in ``args``.
"""
if pep8:
try:
import pytest_pep8 # pylint: disable=W0611
except ImportError:
raise ImportError('PEP8 checking requires pytest-pep8 plugin: '
'http://pypi.python.org/pypi/pytest-pep8')
else:
return ['--pep8', '-k', 'pep8']
return []
@keyword()
def pdb(self, pdb, kwargs):
"""
pdb : bool, optional
Turn on PDB post-mortem analysis for failing tests. Same as
specifying ``--pdb`` in ``args``.
"""
if pdb:
return ['--pdb']
return []
@keyword()
def open_files(self, open_files, kwargs):
"""
open_files : bool, optional
Fail when any tests leave files open. Off by default, because
this adds extra run time to the test suite. Requires the
``psutil`` package.
"""
if open_files:
if kwargs['parallel'] != 0:
raise SystemError(
"open file detection may not be used in conjunction with "
"parallel testing.")
try:
import psutil # pylint: disable=W0611
except ImportError:
raise SystemError(
"open file detection requested, but psutil package "
"is not installed.")
return ['--open-files']
print("Checking for unclosed files")
return []
@keyword(0)
def parallel(self, parallel, kwargs):
"""
parallel : int or 'auto', optional
When provided, run the tests in parallel on the specified
number of CPUs. If parallel is ``'auto'``, it will use the all
the cores on the machine. Requires the ``pytest-xdist`` plugin.
"""
if parallel != 0:
try:
from xdist import plugin # noqa
except ImportError:
raise SystemError(
"running tests in parallel requires the pytest-xdist package")
return ['-n', str(parallel)]
return []
@keyword()
def docs_path(self, docs_path, kwargs):
"""
docs_path : str, optional
The path to the documentation .rst files.
"""
paths = []
if docs_path is not None and not kwargs['skip_docs']:
if kwargs['package'] is not None:
warning_message = ("Can not test .rst docs for {name}, since "
"docs path ({path}) does not exist.")
paths = self.packages_path(kwargs['package'], docs_path,
warning=warning_message)
if len(paths) and not kwargs['test_path']:
paths.append('--doctest-rst')
return paths
@keyword()
def skip_docs(self, skip_docs, kwargs):
"""
skip_docs : `bool`, optional
When `True`, skips running the doctests in the .rst files.
"""
# Skip docs is a bool used by docs_path only.
return []
@keyword()
def repeat(self, repeat, kwargs):
"""
repeat : `int`, optional
If set, specifies how many times each test should be run. This is
useful for diagnosing sporadic failures.
"""
if repeat:
return ['--repeat={0}'.format(repeat)]
return []
# Override run_tests for astropy-specific fixes
def run_tests(self, **kwargs):
# This prevents cyclical import problems that make it
# impossible to test packages that define Table types on their
# own.
from ..table import Table # pylint: disable=W0611
return super(TestRunner, self).run_tests(**kwargs)
|
f0f1086d9c657b3a20425c1015bfcc4004a5c6be624d679a6b5cae7d5e51641f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides the tools used to internally run the astropy test suite
from the installed astropy. It makes use of the `pytest` testing framework.
"""
import os
import sys
import types
import pickle
import warnings
import functools
import pytest
try:
# Import pkg_resources to prevent it from issuing warnings upon being
# imported from within py.test. See
# https://github.com/astropy/astropy/pull/537 for a detailed explanation.
import pkg_resources # pylint: disable=W0611
except ImportError:
pass
from ..units import allclose as quantity_allclose # noqa
from ..utils.exceptions import (AstropyDeprecationWarning,
AstropyPendingDeprecationWarning)
# For backward-compatibility with affiliated packages
from .runner import TestRunner # pylint: disable=W0611
__all__ = ['raises', 'enable_deprecations_as_exceptions', 'remote_data',
'treat_deprecations_as_exceptions', 'catch_warnings',
'assert_follows_unicode_guidelines',
'assert_quantity_allclose', 'check_pickling_recovery',
'pickle_protocol', 'generic_recursive_equality_test']
# pytest marker to mark tests which get data from the web
# This is being maintained for backwards compatibility
remote_data = pytest.mark.remote_data
# distutils expects options to be Unicode strings
def _fix_user_options(options):
def to_str_or_none(x):
if x is None:
return None
return str(x)
return [tuple(to_str_or_none(x) for x in y) for y in options]
def _save_coverage(cov, result, rootdir, testing_path):
"""
This method is called after the tests have been run in coverage mode
to cleanup and then save the coverage data and report.
"""
from ..utils.console import color_print
if result != 0:
return
# The coverage report includes the full path to the temporary
# directory, so we replace all the paths with the true source
# path. Note that this will not work properly for packages that still
# rely on 2to3.
try:
# Coverage 4.0: _harvest_data has been renamed to get_data, the
# lines dict is private
cov.get_data()
except AttributeError:
# Coverage < 4.0
cov._harvest_data()
lines = cov.data.lines
else:
lines = cov.data._lines
for key in list(lines.keys()):
new_path = os.path.relpath(
os.path.realpath(key),
os.path.realpath(testing_path))
new_path = os.path.abspath(
os.path.join(rootdir, new_path))
lines[new_path] = lines.pop(key)
color_print('Saving coverage data in .coverage...', 'green')
cov.save()
color_print('Saving HTML coverage report in htmlcov...', 'green')
cov.html_report(directory=os.path.join(rootdir, 'htmlcov'))
class raises:
"""
A decorator to mark that a test should raise a given exception.
Use as follows::
@raises(ZeroDivisionError)
def test_foo():
x = 1/0
This can also be used a context manager, in which case it is just
an alias for the ``pytest.raises`` context manager (because the
two have the same name this help avoid confusion by being
flexible).
"""
# pep-8 naming exception -- this is a decorator class
def __init__(self, exc):
self._exc = exc
self._ctx = None
def __call__(self, func):
@functools.wraps(func)
def run_raises_test(*args, **kwargs):
pytest.raises(self._exc, func, *args, **kwargs)
return run_raises_test
def __enter__(self):
self._ctx = pytest.raises(self._exc)
return self._ctx.__enter__()
def __exit__(self, *exc_info):
return self._ctx.__exit__(*exc_info)
_deprecations_as_exceptions = False
_include_astropy_deprecations = True
_modules_to_ignore_on_import = set([
'compiler', # A deprecated stdlib module used by py.test
'scipy',
'pygments',
'ipykernel',
'IPython', # deprecation warnings for async and await
'setuptools'])
_warnings_to_ignore_entire_module = set([])
_warnings_to_ignore_by_pyver = {
(3, 5): set([
# py.test reads files with the 'U' flag, which is
# deprecated.
r"'U' mode is deprecated",
# py.test raised this warning in inspect on Python 3.5.
# See https://github.com/pytest-dev/pytest/pull/1009
# Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec()
r"inspect\.getargspec\(\) is deprecated, use "
r"inspect\.signature\(\) instead",
# https://github.com/astropy/astropy/pull/7372
r"Importing from numpy\.testing\.decorators is deprecated, import from numpy\.testing instead\.",
# Deprecation warnings ahead of pytest 4.x
r"MarkInfo objects are deprecated"]),
(3, 6): set([
# py.test reads files with the 'U' flag, which is
# deprecated.
r"'U' mode is deprecated",
# inspect raises this slightly different warning on Python 3.6-3.7.
# Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec()
r"inspect\.getargspec\(\) is deprecated, use "
r"inspect\.signature\(\) or inspect\.getfullargspec\(\)",
# https://github.com/astropy/astropy/pull/7372
r"Importing from numpy\.testing\.decorators is deprecated, import from numpy\.testing instead\.",
# Deprecation warnings ahead of pytest 4.x
r"MarkInfo objects are deprecated"]),
(3, 7): set([
# py.test reads files with the 'U' flag, which is
# deprecated.
r"'U' mode is deprecated",
# inspect raises this slightly different warning on Python 3.6-3.7.
# Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec()
r"inspect\.getargspec\(\) is deprecated, use "
r"inspect\.signature\(\) or inspect\.getfullargspec\(\)",
# https://github.com/astropy/astropy/pull/7372
r"Importing from numpy\.testing\.decorators is deprecated, import from numpy\.testing instead\.",
# Deprecation warnings ahead of pytest 4.x
r"MarkInfo objects are deprecated",
# Deprecation warning for collections.abc, fixed in Astropy but still
# used in lxml, and maybe others
r"Using or importing the ABCs from 'collections'",
]),
}
def enable_deprecations_as_exceptions(include_astropy_deprecations=True,
modules_to_ignore_on_import=[],
warnings_to_ignore_entire_module=[],
warnings_to_ignore_by_pyver={}):
"""
Turn on the feature that turns deprecations into exceptions.
Parameters
----------
include_astropy_deprecations : bool
If set to `True`, ``AstropyDeprecationWarning`` and
``AstropyPendingDeprecationWarning`` are also turned into exceptions.
modules_to_ignore_on_import : list of str
List of additional modules that generate deprecation warnings
on import, which are to be ignored. By default, these are already
included: ``compiler``, ``scipy``, ``pygments``, ``ipykernel``, and
``setuptools``.
warnings_to_ignore_entire_module : list of str
List of modules with deprecation warnings to ignore completely,
not just during import. If ``include_astropy_deprecations=True``
is given, ``AstropyDeprecationWarning`` and
``AstropyPendingDeprecationWarning`` are also ignored for the modules.
warnings_to_ignore_by_pyver : dict
Dictionary mapping tuple of ``(major, minor)`` Python version to
a list of deprecation warning messages to ignore. This is in
addition of those already ignored by default
(see ``_warnings_to_ignore_by_pyver`` values).
"""
global _deprecations_as_exceptions
_deprecations_as_exceptions = True
global _include_astropy_deprecations
_include_astropy_deprecations = include_astropy_deprecations
global _modules_to_ignore_on_import
_modules_to_ignore_on_import.update(modules_to_ignore_on_import)
global _warnings_to_ignore_entire_module
_warnings_to_ignore_entire_module.update(warnings_to_ignore_entire_module)
global _warnings_to_ignore_by_pyver
for key, val in warnings_to_ignore_by_pyver.items():
if key in _warnings_to_ignore_by_pyver:
_warnings_to_ignore_by_pyver[key].update(val)
else:
_warnings_to_ignore_by_pyver[key] = set(val)
def treat_deprecations_as_exceptions():
"""
Turn all DeprecationWarnings (which indicate deprecated uses of
Python itself or Numpy, but not within Astropy, where we use our
own deprecation warning class) into exceptions so that we find
out about them early.
This completely resets the warning filters and any "already seen"
warning state.
"""
# First, totally reset the warning state. The modules may change during
# this iteration thus we copy the original state to a list to iterate
# on. See https://github.com/astropy/astropy/pull/5513.
for module in list(sys.modules.values()):
# We don't want to deal with six.MovedModules, only "real"
# modules.
if (isinstance(module, types.ModuleType) and
hasattr(module, '__warningregistry__')):
del module.__warningregistry__
if not _deprecations_as_exceptions:
return
warnings.resetwarnings()
# Hide the next couple of DeprecationWarnings
warnings.simplefilter('ignore', DeprecationWarning)
# Here's the wrinkle: a couple of our third-party dependencies
# (py.test and scipy) are still using deprecated features
# themselves, and we'd like to ignore those. Fortunately, those
# show up only at import time, so if we import those things *now*,
# before we turn the warnings into exceptions, we're golden.
for m in _modules_to_ignore_on_import:
try:
__import__(m)
except ImportError:
pass
# Now, start over again with the warning filters
warnings.resetwarnings()
# Now, turn DeprecationWarnings into exceptions
_all_warns = [DeprecationWarning]
# Only turn astropy deprecation warnings into exceptions if requested
if _include_astropy_deprecations:
_all_warns += [AstropyDeprecationWarning,
AstropyPendingDeprecationWarning]
for w in _all_warns:
warnings.filterwarnings("error", ".*", w)
# This ignores all deprecation warnings from given module(s),
# not just on import, for use of Astropy affiliated packages.
for m in _warnings_to_ignore_entire_module:
for w in _all_warns:
warnings.filterwarnings('ignore', category=w, module=m)
for v in _warnings_to_ignore_by_pyver:
if sys.version_info[:2] == v:
for s in _warnings_to_ignore_by_pyver[v]:
warnings.filterwarnings("ignore", s, DeprecationWarning)
class catch_warnings(warnings.catch_warnings):
"""
A high-powered version of warnings.catch_warnings to use for testing
and to make sure that there is no dependence on the order in which
the tests are run.
This completely blitzes any memory of any warnings that have
appeared before so that all warnings will be caught and displayed.
``*args`` is a set of warning classes to collect. If no arguments are
provided, all warnings are collected.
Use as follows::
with catch_warnings(MyCustomWarning) as w:
do.something.bad()
assert len(w) > 0
"""
def __init__(self, *classes):
super(catch_warnings, self).__init__(record=True)
self.classes = classes
def __enter__(self):
warning_list = super(catch_warnings, self).__enter__()
treat_deprecations_as_exceptions()
if len(self.classes) == 0:
warnings.simplefilter('always')
else:
warnings.simplefilter('ignore')
for cls in self.classes:
warnings.simplefilter('always', cls)
return warning_list
def __exit__(self, type, value, traceback):
treat_deprecations_as_exceptions()
class ignore_warnings(catch_warnings):
"""
This can be used either as a context manager or function decorator to
ignore all warnings that occur within a function or block of code.
An optional category option can be supplied to only ignore warnings of a
certain category or categories (if a list is provided).
"""
def __init__(self, category=None):
super(ignore_warnings, self).__init__()
if isinstance(category, type) and issubclass(category, Warning):
self.category = [category]
else:
self.category = category
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Originally this just reused self, but that doesn't work if the
# function is called more than once so we need to make a new
# context manager instance for each call
with self.__class__(category=self.category):
return func(*args, **kwargs)
return wrapper
def __enter__(self):
retval = super(ignore_warnings, self).__enter__()
if self.category is not None:
for category in self.category:
warnings.simplefilter('ignore', category)
else:
warnings.simplefilter('ignore')
return retval
def assert_follows_unicode_guidelines(
x, roundtrip=None):
"""
Test that an object follows our Unicode policy. See
"Unicode guidelines" in the coding guidelines.
Parameters
----------
x : object
The instance to test
roundtrip : module, optional
When provided, this namespace will be used to evaluate
``repr(x)`` and ensure that it roundtrips. It will also
ensure that ``__bytes__(x)`` roundtrip.
If not provided, no roundtrip testing will be performed.
"""
from .. import conf
with conf.set_temp('unicode_output', False):
bytes_x = bytes(x)
unicode_x = str(x)
repr_x = repr(x)
assert isinstance(bytes_x, bytes)
bytes_x.decode('ascii')
assert isinstance(unicode_x, str)
unicode_x.encode('ascii')
assert isinstance(repr_x, str)
if isinstance(repr_x, bytes):
repr_x.decode('ascii')
else:
repr_x.encode('ascii')
if roundtrip is not None:
assert x.__class__(bytes_x) == x
assert x.__class__(unicode_x) == x
assert eval(repr_x, roundtrip) == x
with conf.set_temp('unicode_output', True):
bytes_x = bytes(x)
unicode_x = str(x)
repr_x = repr(x)
assert isinstance(bytes_x, bytes)
bytes_x.decode('ascii')
assert isinstance(unicode_x, str)
assert isinstance(repr_x, str)
if isinstance(repr_x, bytes):
repr_x.decode('ascii')
else:
repr_x.encode('ascii')
if roundtrip is not None:
assert x.__class__(bytes_x) == x
assert x.__class__(unicode_x) == x
assert eval(repr_x, roundtrip) == x
@pytest.fixture(params=[0, 1, -1])
def pickle_protocol(request):
"""
Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced).
(Originally from astropy.table.tests.test_pickle)
"""
return request.param
def generic_recursive_equality_test(a, b, class_history):
"""
Check if the attributes of a and b are equal. Then,
check if the attributes of the attributes are equal.
"""
dict_a = a.__dict__
dict_b = b.__dict__
for key in dict_a:
assert key in dict_b,\
"Did not pickle {0}".format(key)
if hasattr(dict_a[key], '__eq__'):
eq = (dict_a[key] == dict_b[key])
if '__iter__' in dir(eq):
eq = (False not in eq)
assert eq, "Value of {0} changed by pickling".format(key)
if hasattr(dict_a[key], '__dict__'):
if dict_a[key].__class__ in class_history:
# attempt to prevent infinite recursion
pass
else:
new_class_history = [dict_a[key].__class__]
new_class_history.extend(class_history)
generic_recursive_equality_test(dict_a[key],
dict_b[key],
new_class_history)
def check_pickling_recovery(original, protocol):
"""
Try to pickle an object. If successful, make sure
the object's attributes survived pickling and unpickling.
"""
f = pickle.dumps(original, protocol=protocol)
unpickled = pickle.loads(f)
class_history = [original.__class__]
generic_recursive_equality_test(original, unpickled,
class_history)
def assert_quantity_allclose(actual, desired, rtol=1.e-7, atol=None,
**kwargs):
"""
Raise an assertion if two objects are not equal up to desired tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.testing.assert_allclose`.
"""
import numpy as np
from ..units.quantity import _unquantify_allclose_arguments
np.testing.assert_allclose(*_unquantify_allclose_arguments(
actual, desired, rtol, atol), **kwargs)
|
8b33dbfd4374575efa9cc39967454cbdceefb152c2f2a4d7ec34e2f21868e3dc | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["bls_fast", "bls_slow"]
import numpy as np
from functools import partial
from ._impl import bls_impl
def bls_slow(t, y, ivar, period, duration, oversample, use_likelihood):
"""Compute the periodogram using a brute force reference method
t : array-like
Sequence of observation times.
y : array-like
Sequence of observations associated with times t.
ivar : array-like
The inverse variance of ``y``.
period : array-like
The trial periods where the periodogram should be computed.
duration : array-like
The durations that should be tested.
oversample :
The resolution of the phase grid in units of durations.
use_likeliood : bool
If true, maximize the log likelihood over phase, duration, and depth.
Returns
-------
power : array-like
The periodogram evaluated at the periods in ``period``.
depth : array-like
The estimated depth of the maximum power model at each period.
depth_err : array-like
The 1-sigma uncertainty on ``depth``.
duration : array-like
The maximum power duration at each period.
transit_time : array-like
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like
The log likelihood of the maximum power model.
"""
f = partial(_bls_slow_one, t, y, ivar, duration,
oversample, use_likelihood)
return _apply(f, period)
def bls_fast(t, y, ivar, period, duration, oversample, use_likelihood):
"""Compute the periodogram using an optimized Cython implementation
t : array-like
Sequence of observation times.
y : array-like
Sequence of observations associated with times t.
ivar : array-like
The inverse variance of ``y``.
period : array-like
The trial periods where the periodogram should be computed.
duration : array-like
The durations that should be tested.
oversample :
The resolution of the phase grid in units of durations.
use_likeliood : bool
If true, maximize the log likelihood over phase, duration, and depth.
Returns
-------
power : array-like
The periodogram evaluated at the periods in ``period``.
depth : array-like
The estimated depth of the maximum power model at each period.
depth_err : array-like
The 1-sigma uncertainty on ``depth``.
duration : array-like
The maximum power duration at each period.
transit_time : array-like
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like
The log likelihood of the maximum power model.
"""
return bls_impl(
t, y, ivar, period, duration, oversample, use_likelihood
)
def _bls_slow_one(t, y, ivar, duration, oversample, use_likelihood, period):
"""A private function to compute the brute force periodogram result"""
best = (-np.inf, None)
hp = 0.5*period
for dur in duration:
# Compute the phase grid (this is set by the duration and oversample).
d_phase = dur / oversample
phase = np.arange(0, period+d_phase, d_phase)
for t0 in phase:
# Figure out which data points are in and out of transit.
m_in = np.abs((t-t0+hp) % period - hp) < 0.5*dur
m_out = ~m_in
# Compute the estimates of the in and out-of-transit flux.
ivar_in = np.sum(ivar[m_in])
ivar_out = np.sum(ivar[m_out])
y_in = np.sum(y[m_in] * ivar[m_in]) / ivar_in
y_out = np.sum(y[m_out] * ivar[m_out]) / ivar_out
# Use this to compute the best fit depth and uncertainty.
depth = y_out - y_in
depth_err = np.sqrt(1.0 / ivar_in + 1.0 / ivar_out)
snr = depth / depth_err
# Compute the log likelihood of this model.
loglike = -0.5*np.sum((y_in - y[m_in])**2 * ivar[m_in])
loglike += 0.5*np.sum((y_out - y[m_in])**2 * ivar[m_in])
# Choose which objective should be used for the optimization.
if use_likelihood:
objective = loglike
else:
objective = snr
# If this model is better than any before, keep it.
if depth > 0 and objective > best[0]:
best = (
objective,
(objective, depth, depth_err, dur, t0, snr, loglike)
)
return best[1]
def _apply(f, period):
return tuple(map(np.array, zip(*map(f, period))))
|
786a42d2f769c2323559dec03dbcfbeb525e216476ad4a8a9ed116c8fdc3c0c3 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["BoxLeastSquares", "BoxLeastSquaresResults"]
import numpy as np
from ...tests.helper import assert_quantity_allclose
from ... import units
from ..lombscargle.core import has_units, strip_units
from . import methods
def validate_unit_consistency(reference_object, input_object):
if has_units(reference_object):
input_object = units.Quantity(input_object, unit=reference_object.unit)
else:
if has_units(input_object):
input_object = units.Quantity(input_object, unit=units.one)
input_object = input_object.value
return input_object
class BoxLeastSquares(object):
"""Compute the box least squares periodogram
This method is a commonly used tool for discovering transiting exoplanets
or eclipsing binaries in photometric time series datasets. This
implementation is based on the "box least squares (BLS)" method described
in [1]_ and [2]_.
Parameters
----------
t : array-like or Quantity
Sequence of observation times.
y : array-like or Quantity
Sequence of observations associated with times ``t``.
dy : float, array-like or Quantity, optional
Error or sequence of observational errors associated with times ``t``.
Examples
--------
Generate noisy data with a transit:
>>> rand = np.random.RandomState(42)
>>> t = rand.uniform(0, 10, 500)
>>> y = np.ones_like(t)
>>> y[np.abs((t + 1.0)%2.0-1)<0.08] = 1.0 - 0.1
>>> y += 0.01 * rand.randn(len(t))
Compute the transit periodogram on a heuristically determined period grid
and find the period with maximum power:
>>> model = BoxLeastSquares(t, y)
>>> results = model.autopower(0.16)
>>> results.period[np.argmax(results.power)] # doctest: +FLOAT_CMP
2.005441310651872
Compute the periodogram on a user-specified period grid:
>>> periods = np.linspace(1.9, 2.1, 5)
>>> results = model.power(periods, 0.16)
>>> results.power # doctest: +FLOAT_CMP
array([0.01479464, 0.03804835, 0.09640946, 0.05199547, 0.01970484])
If the inputs are AstroPy Quantities with units, the units will be
validated and the outputs will also be Quantities with appropriate units:
>>> from astropy import units as u
>>> t = t * u.day
>>> y = y * u.dimensionless_unscaled
>>> model = BoxLeastSquares(t, y)
>>> results = model.autopower(0.16 * u.day)
>>> results.period.unit
Unit("d")
>>> results.power.unit
Unit(dimensionless)
References
----------
.. [1] Kovacs, Zucker, & Mazeh (2002), A&A, 391, 369
(arXiv:astro-ph/0206099)
.. [2] Hartman & Bakos (2016), Astronomy & Computing, 17, 1
(arXiv:1605.06811)
"""
def __init__(self, t, y, dy=None):
self.t, self.y, self.dy = self._validate_inputs(t, y, dy)
def autoperiod(self, duration,
minimum_period=None, maximum_period=None,
minimum_n_transit=3, frequency_factor=1.0):
"""Determine a suitable grid of periods
This method uses a set of heuristics to select a conservative period
grid that is uniform in frequency. This grid might be too fine for
some user's needs depending on the precision requirements or the
sampling of the data. The grid can be made coarser by increasing
``frequency_factor``.
Parameters
----------
duration : float, array-like or Quantity
The set of durations that will be considered.
minimum_period, maximum_period : float or Quantity, optional
The minimum/maximum periods to search. If not provided, these will
be computed as described in the notes below.
minimum_n_transits : int, optional
If ``maximum_period`` is not provided, this is used to compute the
maximum period to search by asserting that any systems with at
least ``minimum_n_transits`` will be within the range of searched
periods. Note that this is not the same as requiring that
``minimum_n_transits`` be required for detection. The default
value is ``3``.
frequency_factor : float, optional
A factor to control the frequency spacing as described in the
notes below. The default value is ``1.0``.
Returns
-------
period : array-like or Quantity
The set of periods computed using these heuristics with the same
units as ``t``.
Notes
-----
The default minimum period is chosen to be twice the maximum duration
because there won't be much sensitivity to periods shorter than that.
The default maximum period is computed as
.. code-block:: python
maximum_period = (max(t) - min(t)) / minimum_n_transits
ensuring that any systems with at least ``minimum_n_transits`` are
within the range of searched periods.
The frequency spacing is given by
.. code-block:: python
df = frequency_factor * min(duration) / (max(t) - min(t))**2
so the grid can be made finer by decreasing ``frequency_factor`` or
coarser by increasing ``frequency_factor``.
"""
duration = self._validate_duration(duration)
baseline = strip_units(self.t.max() - self.t.min())
min_duration = strip_units(np.min(duration))
# Estimate the required frequency spacing
# Because of the sparsity of a transit, this must be much finer than
# the frequency resolution for a sinusoidal fit. For a sinusoidal fit,
# df would be 1/baseline (see LombScargle), but here this should be
# scaled proportionally to the duration in units of baseline.
df = frequency_factor * min_duration / baseline**2
# If a minimum period is not provided, choose one that is twice the
# maximum duration because we won't be sensitive to any periods
# shorter than that.
if minimum_period is None:
minimum_period = 2.0 * strip_units(np.max(duration))
else:
minimum_period = validate_unit_consistency(self.t, minimum_period)
minimum_period = strip_units(minimum_period)
# If no maximum period is provided, choose one by requiring that
# all signals with at least minimum_n_transit should be detectable.
if maximum_period is None:
if minimum_n_transit <= 1:
raise ValueError("minimum_n_transit must be greater than 1")
maximum_period = baseline / (minimum_n_transit-1)
else:
maximum_period = validate_unit_consistency(self.t, maximum_period)
maximum_period = strip_units(maximum_period)
if maximum_period < minimum_period:
minimum_period, maximum_period = maximum_period, minimum_period
if minimum_period <= 0.0:
raise ValueError("minimum_period must be positive")
# Convert bounds to frequency
minimum_frequency = 1.0/strip_units(maximum_period)
maximum_frequency = 1.0/strip_units(minimum_period)
# Compute the number of frequencies and the frequency grid
nf = 1 + int(np.round((maximum_frequency - minimum_frequency)/df))
return 1.0/(maximum_frequency-df*np.arange(nf)) * self._t_unit()
def autopower(self, duration, objective=None, method=None, oversample=10,
minimum_n_transit=3, minimum_period=None,
maximum_period=None, frequency_factor=1.0):
"""Compute the periodogram at set of heuristically determined periods
This method calls :func:`BoxLeastSquares.autoperiod` to determine
the period grid and then :func:`BoxLeastSquares.power` to compute
the periodogram. See those methods for documentation of the arguments.
"""
period = self.autoperiod(duration,
minimum_n_transit=minimum_n_transit,
minimum_period=minimum_period,
maximum_period=maximum_period,
frequency_factor=frequency_factor)
return self.power(period, duration, objective=objective, method=method,
oversample=oversample)
def power(self, period, duration, objective=None, method=None,
oversample=10):
"""Compute the periodogram for a set of periods
Parameters
----------
period : array-like or Quantity
The periods where the power should be computed
duration : float, array-like or Quantity
The set of durations to test
objective : {'likelihood', 'snr'}, optional
The scalar that should be optimized to find the best fit phase,
duration, and depth. This can be either ``'likelihood'`` (default)
to optimize the log-likelihood of the model, or ``'snr'`` to
optimize the signal-to-noise with which the transit depth is
measured.
method : {'fast', 'slow'}, optional
The computational method used to compute the periodogram. This is
mainly included for the purposes of testing and most users will
want to use the optimized ``'fast'`` method (default) that is
implemented in Cython. ``'slow'`` is a brute-force method that is
used to test the results of the ``'fast'`` method.
oversample : int, optional
The number of bins per duration that should be used. This sets the
time resolution of the phase fit with larger values of
``oversample`` yielding a finer grid and higher computational cost.
Returns
-------
results : BoxLeastSquaresResults
The periodogram results as a :class:`BoxLeastSquaresResults`
object.
Raises
------
ValueError
If ``oversample`` is not an integer greater than 0 or if
``objective`` or ``method`` are not valid.
"""
period, duration = self._validate_period_and_duration(period, duration)
# Check for absurdities in the ``oversample`` choice
try:
oversample = int(oversample)
except TypeError:
raise ValueError("oversample must be an int, got {0}"
.format(oversample))
if oversample < 1:
raise ValueError("oversample must be greater than or equal to 1")
# Select the periodogram objective
if objective is None:
objective = "likelihood"
allowed_objectives = ["snr", "likelihood"]
if objective not in allowed_objectives:
raise ValueError(("Unrecognized method '{0}'\n"
"allowed methods are: {1}")
.format(objective, allowed_objectives))
use_likelihood = (objective == "likelihood")
# Select the computational method
if method is None:
method = "fast"
allowed_methods = ["fast", "slow"]
if method not in allowed_methods:
raise ValueError(("Unrecognized method '{0}'\n"
"allowed methods are: {1}")
.format(method, allowed_methods))
# Format and check the input arrays
t = np.ascontiguousarray(strip_units(self.t), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = 1.0 / np.ascontiguousarray(strip_units(self.dy),
dtype=np.float64)**2
# Make sure that the period and duration arrays are C-order
period_fmt = np.ascontiguousarray(strip_units(period),
dtype=np.float64)
duration = np.ascontiguousarray(strip_units(duration),
dtype=np.float64)
# Select the correct implementation for the chosen method
if method == "fast":
bls = methods.bls_fast
else:
bls = methods.bls_slow
# Run the implementation
results = bls(
t, y - np.median(y), ivar, period_fmt, duration,
oversample, use_likelihood)
return self._format_results(objective, period, results)
def model(self, t_model, period, duration, transit_time):
"""Compute the transit model at the given period, duration, and phase
Parameters
----------
t_model : array-like or Quantity
Times at which to compute the model.
period : float or Quantity
The period of the transits.
duration : float or Quantity
The duration of the transit.
transit_time : float or Quantity
The mid-transit time of a reference transit.
Returns
-------
y_model : array-like or Quantity
The model evaluated at the times ``t_model`` with units of ``y``.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = validate_unit_consistency(self.t, transit_time)
t_model = strip_units(validate_unit_consistency(self.t, t_model))
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
t = np.ascontiguousarray(strip_units(self.t), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = 1.0 / np.ascontiguousarray(strip_units(self.dy),
dtype=np.float64)**2
# Compute the depth
hp = 0.5*period
m_in = np.abs((t-transit_time+hp) % period - hp) < 0.5*duration
m_out = ~m_in
y_in = np.sum(y[m_in] * ivar[m_in]) / np.sum(ivar[m_in])
y_out = np.sum(y[m_out] * ivar[m_out]) / np.sum(ivar[m_out])
# Evaluate the model
y_model = y_out + np.zeros_like(t_model)
m_model = np.abs((t_model-transit_time+hp) % period-hp) < 0.5*duration
y_model[m_model] = y_in
return y_model * self._y_unit()
def compute_stats(self, period, duration, transit_time):
"""Compute descriptive statistics for a given transit model
These statistics are commonly used for vetting of transit candidates.
Parameters
----------
period : float or Quantity
The period of the transits.
duration : float or Quantity
The duration of the transit.
transit_time : float or Quantity
The mid-transit time of a reference transit.
Returns
-------
stats : dict
A dictionary containing several descriptive statistics:
- ``depth``: The depth and uncertainty (as a tuple with two
values) on the depth for the fiducial model.
- ``depth_odd``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period.
- ``depth_even``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period and the
phase is offset by one orbital period.
- ``depth_half``: The depth and uncertainty for a model with a
period of half the fiducial period.
- ``depth_phased``: The depth and uncertainty for a model with the
fiducial period and the phase offset by half a period.
- ``harmonic_amplitude``: The amplitude of the best fit sinusoidal
model.
- ``harmonic_delta_log_likelihood``: The difference in log
likelihood between a sinusoidal model and the transit model.
If ``harmonic_delta_log_likelihood`` is greater than zero, the
sinusoidal model is preferred.
- ``transit_times``: The mid-transit time for each transit in the
baseline.
- ``per_transit_count``: An array with a count of the number of
data points in each unique transit included in the baseline.
- ``per_transit_log_likelihood``: An array with the value of the
log likelihood for each unique transit included in the
baseline.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = validate_unit_consistency(self.t, transit_time)
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
t = np.ascontiguousarray(strip_units(self.t), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = 1.0 / np.ascontiguousarray(strip_units(self.dy),
dtype=np.float64)**2
# This a helper function that will compute the depth for several
# different hypothesized transit models with different parameters
def _compute_depth(m, y_out=None, var_out=None):
if np.any(m) and (var_out is None or np.isfinite(var_out)):
var_m = 1.0 / np.sum(ivar[m])
y_m = np.sum(y[m] * ivar[m]) * var_m
if y_out is None:
return y_m, var_m
return y_out - y_m, np.sqrt(var_m + var_out)
return 0.0, np.inf
# Compute the depth of the fiducial model and the two models at twice
# the period
hp = 0.5*period
m_in = np.abs((t-transit_time+hp) % period - hp) < 0.5*duration
m_out = ~m_in
m_odd = np.abs((t-transit_time) % (2*period) - period) \
< 0.5*duration
m_even = np.abs((t-transit_time+period) % (2*period) - period) \
< 0.5*duration
y_out, var_out = _compute_depth(m_out)
depth = _compute_depth(m_in, y_out, var_out)
depth_odd = _compute_depth(m_odd, y_out, var_out)
depth_even = _compute_depth(m_even, y_out, var_out)
y_in = y_out - depth[0]
# Compute the depth of the model at a phase of 0.5*period
m_phase = np.abs((t-transit_time) % period - hp) < 0.5*duration
depth_phase = _compute_depth(m_phase,
*_compute_depth((~m_phase) & m_out))
# Compute the depth of a model with a period of 0.5*period
m_half = np.abs((t-transit_time+0.25*period) % (0.5*period)
- 0.25*period) < 0.5*duration
depth_half = _compute_depth(m_half, *_compute_depth(~m_half))
# Compute the number of points in each transit
transit_id = np.round((t[m_in]-transit_time) / period).astype(int)
transit_times = period * np.arange(transit_id.min(),
transit_id.max()+1) + transit_time
unique_ids, unique_counts = np.unique(transit_id,
return_counts=True)
unique_ids -= np.min(transit_id)
transit_id -= np.min(transit_id)
counts = np.zeros(np.max(transit_id) + 1, dtype=int)
counts[unique_ids] = unique_counts
# Compute the per-transit log likelihood
ll = -0.5 * ivar[m_in] * ((y[m_in] - y_in)**2 - (y[m_in] - y_out)**2)
lls = np.zeros(len(counts))
for i in unique_ids:
lls[i] = np.sum(ll[transit_id == i])
full_ll = -0.5*np.sum(ivar[m_in] * (y[m_in] - y_in)**2)
full_ll -= 0.5*np.sum(ivar[m_out] * (y[m_out] - y_out)**2)
# Compute the log likelihood of a sine model
A = np.vstack((
np.sin(2*np.pi*t/period), np.cos(2*np.pi*t/period),
np.ones_like(t)
)).T
w = np.linalg.solve(np.dot(A.T, A * ivar[:, None]),
np.dot(A.T, y * ivar))
mod = np.dot(A, w)
sin_ll = -0.5*np.sum((y-mod)**2*ivar)
# Format the results
y_unit = self._y_unit()
ll_unit = 1
if self.dy is None:
ll_unit = y_unit * y_unit
return dict(
transit_times=transit_times * self._t_unit(),
per_transit_count=counts,
per_transit_log_likelihood=lls * ll_unit,
depth=(depth[0] * y_unit, depth[1] * y_unit),
depth_phased=(depth_phase[0] * y_unit, depth_phase[1] * y_unit),
depth_half=(depth_half[0] * y_unit, depth_half[1] * y_unit),
depth_odd=(depth_odd[0] * y_unit, depth_odd[1] * y_unit),
depth_even=(depth_even[0] * y_unit, depth_even[1] * y_unit),
harmonic_amplitude=np.sqrt(np.sum(w[:2]**2)) * y_unit,
harmonic_delta_log_likelihood=(sin_ll - full_ll) * ll_unit,
)
def transit_mask(self, t, period, duration, transit_time):
"""Compute which data points are in transit for a given parameter set
Parameters
----------
t_model : array-like or Quantity
Times where the mask should be evaluated.
period : float or Quantity
The period of the transits.
duration : float or Quantity
The duration of the transit.
transit_time : float or Quantity
The mid-transit time of a reference transit.
Returns
-------
transit_mask : array-like
A boolean array where ``True`` indicates and in transit point and
``False`` indicates and out-of-transit point.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = validate_unit_consistency(self.t, transit_time)
t = strip_units(validate_unit_consistency(self.t, t))
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
hp = 0.5*period
return np.abs((t-transit_time+hp) % period - hp) < 0.5*duration
def _validate_inputs(self, t, y, dy):
"""Private method used to check the consistency of the inputs
Parameters
----------
t : array-like or Quantity
Sequence of observation times.
y : array-like or Quantity
Sequence of observations associated with times t.
dy : float, array-like or Quantity
Error or sequence of observational errors associated with times t.
Returns
-------
t, y, dy : array-like or Quantity
The inputs with consistent shapes and units.
Raises
------
ValueError
If the dimensions are incompatible or if the units of dy cannot be
converted to the units of y.
"""
# Validate shapes of inputs
if dy is None:
t, y = np.broadcast_arrays(t, y, subok=True)
else:
t, y, dy = np.broadcast_arrays(t, y, dy, subok=True)
if t.ndim != 1:
raise ValueError("Inputs (t, y, dy) must be 1-dimensional")
# validate units of inputs if any is a Quantity
if dy is not None:
dy = validate_unit_consistency(y, dy)
return t, y, dy
def _validate_duration(self, duration):
"""Private method used to check a set of test durations
Parameters
----------
duration : float, array-like or Quantity
The set of durations that will be considered.
Returns
-------
duration : array-like or Quantity
The input reformatted with the correct shape and units.
Raises
------
ValueError
If the units of duration cannot be converted to the units of t.
"""
duration = np.atleast_1d(np.abs(duration))
if duration.ndim != 1 or duration.size == 0:
raise ValueError("duration must be 1-dimensional")
return validate_unit_consistency(self.t, duration)
def _validate_period_and_duration(self, period, duration):
"""Private method used to check a set of periods and durations
Parameters
----------
period : float, array-like or Quantity
The set of test periods.
duration : float, array-like or Quantity
The set of durations that will be considered.
Returns
-------
period, duration : array-like or Quantity
The inputs reformatted with the correct shapes and units.
Raises
------
ValueError
If the units of period or duration cannot be converted to the
units of t.
"""
duration = self._validate_duration(duration)
period = np.atleast_1d(np.abs(period))
if period.ndim != 1 or period.size == 0:
raise ValueError("period must be 1-dimensional")
period = validate_unit_consistency(self.t, period)
if not np.min(period) > np.max(duration):
raise ValueError("The maximum transit duration must be shorter "
"than the minimum period")
return period, duration
def _format_results(self, objective, period, results):
"""A private method used to wrap and add units to the periodogram
Parameters
----------
objective : string
The name of the objective used in the optimization.
period : array-like or Quantity
The set of trial periods.
results : tuple
The output of one of the periodogram implementations.
"""
(power, depth, depth_err, transit_time, duration, depth_snr,
log_likelihood) = results
if has_units(self.t):
transit_time = units.Quantity(transit_time, unit=self.t.unit)
duration = units.Quantity(duration, unit=self.t.unit)
if has_units(self.y):
depth = units.Quantity(depth, unit=self.y.unit)
depth_err = units.Quantity(depth_err, unit=self.y.unit)
depth_snr = units.Quantity(depth_snr, unit=units.one)
if self.dy is None:
if objective == "likelihood":
power = units.Quantity(power, unit=self.y.unit**2)
else:
power = units.Quantity(power, unit=units.one)
log_likelihood = units.Quantity(log_likelihood,
unit=self.y.unit**2)
else:
power = units.Quantity(power, unit=units.one)
log_likelihood = units.Quantity(log_likelihood, unit=units.one)
return BoxLeastSquaresResults(
objective, period, power, depth, depth_err, transit_time, duration,
depth_snr, log_likelihood)
def _t_unit(self):
if has_units(self.t):
return self.t.unit
else:
return 1
def _y_unit(self):
if has_units(self.y):
return self.y.unit
else:
return 1
class BoxLeastSquaresResults(dict):
"""The results of a BoxLeastSquares search
Attributes
----------
objective : string
The scalar used to optimize to find the best fit phase, duration, and
depth. See :func:`BoxLeastSquares.power` for more information.
period : array-like or Quantity
The set of test periods.
power : array-like or Quantity
The periodogram evaluated at the periods in ``period``. If
``objective`` is:
* ``'likelihood'``: the values of ``power`` are the
log likelihood maximized over phase, depth, and duration, or
* ``'snr'``: the values of ``power`` are the signal-to-noise with
which the depth is measured maximized over phase, depth, and
duration.
depth : array-like or Quantity
The estimated depth of the maximum power model at each period.
depth_err : array-like or Quantity
The 1-sigma uncertainty on ``depth``.
duration : array-like or Quantity
The maximum power duration at each period.
transit_time : array-like or Quantity
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like or Quantity
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like or Quantity
The log likelihood of the maximum power model.
"""
def __init__(self, *args):
super(BoxLeastSquaresResults, self).__init__(zip(
("objective", "period", "power", "depth", "depth_err",
"duration", "transit_time", "depth_snr", "log_likelihood"),
args
))
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in sorted(self.items())])
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
def assert_allclose(self, other, **kwargs):
"""Assert that another BoxLeastSquaresResults object is consistent
This method loops over all attributes and compares the values using
:func:`~astropy.tests.helper.assert_quantity_allclose` function.
Parameters
----------
other : BoxLeastSquaresResults
The other results object to compare.
"""
for k, v in self.items():
if k not in other:
raise AssertionError("missing key '{0}'".format(k))
if k == "objective":
assert v == other[k], (
"Mismatched objectives. Expected '{0}', got '{1}'"
.format(v, other[k])
)
continue
assert_quantity_allclose(v, other[k], **kwargs)
|
6cb62e0b022aa40041c9bc2318e74df61359c5807cb8e7f536f32d8a7c1308c7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from os.path import join
from distutils.core import Extension
BLS_ROOT = os.path.relpath(os.path.dirname(__file__))
def get_extensions():
ext = Extension(
"astropy.stats.bls._impl",
sources=[
join(BLS_ROOT, "bls.c"),
join(BLS_ROOT, "_impl.pyx"),
],
include_dirs=["numpy"],
)
return [ext]
|
b314a2f2224a9137ae50b49187b31c3000b130f0ec77717a4d2815e75bb898b3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from .. import histogram, scott_bin_width, freedman_bin_width, knuth_bin_width
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
def test_scott_bin_width(N=10000, rseed=0):
rng = np.random.RandomState(rseed)
X = rng.randn(N)
delta = scott_bin_width(X)
assert_allclose(delta, 3.5 * np.std(X) / N ** (1 / 3))
delta, bins = scott_bin_width(X, return_bins=True)
assert_allclose(delta, 3.5 * np.std(X) / N ** (1 / 3))
with pytest.raises(ValueError):
scott_bin_width(rng.rand(2, 10))
def test_freedman_bin_width(N=10000, rseed=0):
rng = np.random.RandomState(rseed)
X = rng.randn(N)
v25, v75 = np.percentile(X, [25, 75])
delta = freedman_bin_width(X)
assert_allclose(delta, 2 * (v75 - v25) / N ** (1 / 3))
delta, bins = freedman_bin_width(X, return_bins=True)
assert_allclose(delta, 2 * (v75 - v25) / N ** (1 / 3))
with pytest.raises(ValueError):
freedman_bin_width(rng.rand(2, 10))
# data with too small IQR
test_x = [1, 2, 3] + [4] * 100 + [5, 6, 7]
with pytest.raises(ValueError) as e:
freedman_bin_width(test_x, return_bins=True)
assert 'Please use another bin method' in str(e)
# data with small IQR but not too small
test_x = np.asarray([1, 2, 3] * 100 + [4] + [5, 6, 7], dtype=np.float32)
test_x *= 1.5e-6
delta, bins = freedman_bin_width(test_x, return_bins=True)
assert_allclose(delta, 8.923325554510689e-07)
@pytest.mark.skipif('not HAS_SCIPY')
def test_knuth_bin_width(N=10000, rseed=0):
rng = np.random.RandomState(rseed)
X = rng.randn(N)
dx, bins = knuth_bin_width(X, return_bins=True)
assert_allclose(len(bins), 59)
dx2 = knuth_bin_width(X)
assert dx == dx2
with pytest.raises(ValueError):
knuth_bin_width(rng.rand(2, 10))
@pytest.mark.skipif('not HAS_SCIPY')
def test_knuth_histogram(N=1000, rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(N)
counts, bins = histogram(x, 'knuth')
assert (counts.sum() == len(x))
assert (len(counts) == len(bins) - 1)
def test_histogram(N=1000, rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(N)
for bins in [30, np.linspace(-5, 5, 31),
'scott', 'freedman', 'blocks']:
counts, bins = histogram(x, bins)
assert (counts.sum() == len(x))
assert (len(counts) == len(bins) - 1)
def test_histogram_range(N=1000, rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(N)
range = (0.1, 0.8)
for bins in ['scott', 'freedman', 'blocks']:
counts, bins = histogram(x, bins, range=range)
@pytest.mark.skipif('not HAS_SCIPY')
def test_histogram_output_knuth():
rng = np.random.RandomState(0)
X = rng.randn(100)
counts, bins = histogram(X, bins='knuth')
assert_allclose(counts, [1, 6, 9, 14, 21, 22, 12, 8, 7])
assert_allclose(bins, [-2.55298982, -2.01712932, -1.48126883, -0.94540834,
-0.40954784, 0.12631265, 0.66217314, 1.19803364,
1.73389413, 2.26975462])
def test_histogram_output():
rng = np.random.RandomState(0)
X = rng.randn(100)
counts, bins = histogram(X, bins=10)
assert_allclose(counts, [1, 5, 7, 13, 17, 18, 16, 11, 7, 5])
assert_allclose(bins, [-2.55298982, -2.07071537, -1.58844093, -1.10616648,
-0.62389204, -0.1416176, 0.34065685, 0.82293129,
1.30520574, 1.78748018, 2.26975462])
counts, bins = histogram(X, bins='scott')
assert_allclose(counts, [2, 13, 23, 34, 16, 10, 2])
assert_allclose(bins, [-2.55298982, -1.79299405, -1.03299829, -0.27300252,
0.48699324, 1.24698901, 2.00698477, 2.76698054])
counts, bins = histogram(X, bins='freedman')
assert_allclose(counts, [2, 7, 13, 20, 26, 14, 11, 5, 2])
assert_allclose(bins, [-2.55298982, -1.95796338, -1.36293694, -0.7679105,
-0.17288406, 0.42214237, 1.01716881, 1.61219525,
2.20722169, 2.80224813])
counts, bins = histogram(X, bins='blocks')
assert_allclose(counts, [10, 61, 29])
assert_allclose(bins, [-2.55298982, -1.24381059, 0.46422235, 2.26975462])
def test_histogram_badargs(N=1000, rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(N)
# weights is not supported
for bins in ['scott', 'freedman', 'blocks']:
with pytest.raises(NotImplementedError):
histogram(x, bins, weights=x)
# bad bins arg gives ValueError
with pytest.raises(ValueError):
histogram(x, bins='bad_argument')
|
688c8a8e6489dad89f7c4c1af0f147ab445c4d66cd8aa6d972ae0eea10202da8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.random import randn
from numpy.testing import assert_equal, assert_allclose
try:
from scipy import stats # used in testing
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
from ..sigma_clipping import sigma_clip, SigmaClip, sigma_clipped_stats
from ...utils.misc import NumpyRNGContext
def test_sigma_clip():
# need to seed the numpy RNG to make sure we don't get some
# amazingly flukey random number that breaks one of the tests
with NumpyRNGContext(12345):
# Amazing, I've got the same combination on my luggage!
randvar = randn(10000)
filtered_data = sigma_clip(randvar, sigma=1, maxiters=2)
assert sum(filtered_data.mask) > 0
assert sum(~filtered_data.mask) < randvar.size
# this is actually a silly thing to do, because it uses the
# standard deviation as the variance, but it tests to make sure
# these arguments are actually doing something
filtered_data2 = sigma_clip(randvar, sigma=1, maxiters=2,
stdfunc=np.var)
assert not np.all(filtered_data.mask == filtered_data2.mask)
filtered_data3 = sigma_clip(randvar, sigma=1, maxiters=2,
cenfunc=np.mean)
assert not np.all(filtered_data.mask == filtered_data3.mask)
# make sure the maxiters=None method works at all.
filtered_data = sigma_clip(randvar, sigma=3, maxiters=None)
# test copying
assert filtered_data.data[0] == randvar[0]
filtered_data.data[0] += 1.
assert filtered_data.data[0] != randvar[0]
filtered_data = sigma_clip(randvar, sigma=3, maxiters=None,
copy=False)
assert filtered_data.data[0] == randvar[0]
filtered_data.data[0] += 1.
assert filtered_data.data[0] == randvar[0]
# test axis
data = np.arange(5) + np.random.normal(0., 0.05, (5, 5)) + \
np.diag(np.ones(5))
filtered_data = sigma_clip(data, axis=0, sigma=2.3)
assert filtered_data.count() == 20
filtered_data = sigma_clip(data, axis=1, sigma=2.3)
assert filtered_data.count() == 25
@pytest.mark.skipif('not HAS_SCIPY')
def test_compare_to_scipy_sigmaclip():
# need to seed the numpy RNG to make sure we don't get some
# amazingly flukey random number that breaks one of the tests
with NumpyRNGContext(12345):
randvar = randn(10000)
astropyres = sigma_clip(randvar, sigma=3, maxiters=None,
cenfunc=np.mean)
scipyres = stats.sigmaclip(randvar, 3, 3)[0]
assert astropyres.count() == len(scipyres)
assert_equal(astropyres[~astropyres.mask].data, scipyres)
def test_sigma_clip_scalar_mask():
"""Test that the returned mask is not a scalar."""
data = np.arange(5)
result = sigma_clip(data, sigma=100., maxiters=1)
assert result.mask.shape != ()
def test_sigma_clip_class():
with NumpyRNGContext(12345):
data = randn(100)
data[10] = 1.e5
sobj = SigmaClip(sigma=1, maxiters=2)
sfunc = sigma_clip(data, sigma=1, maxiters=2)
assert_equal(sobj(data), sfunc)
def test_sigma_clip_mean():
with NumpyRNGContext(12345):
data = np.random.normal(0., 0.05, (10, 10))
data[2, 2] = 1.e5
sobj1 = SigmaClip(sigma=1, maxiters=2, cenfunc='mean')
sobj2 = SigmaClip(sigma=1, maxiters=2, cenfunc=np.nanmean)
assert_equal(sobj1(data), sobj2(data))
assert_equal(sobj1(data, axis=0), sobj2(data, axis=0))
def test_sigma_clip_invalid_cenfunc_stdfunc():
with pytest.raises(ValueError):
SigmaClip(cenfunc='invalid')
with pytest.raises(ValueError):
SigmaClip(stdfunc='invalid')
def test_sigma_clipped_stats():
"""Test list data with input mask or mask_value (#3268)."""
# test list data with mask
data = [0, 1]
mask = np.array([True, False])
result = sigma_clipped_stats(data, mask=mask)
# Check that the result of np.ma.median was converted to a scalar
assert isinstance(result[1], float)
assert result == (1., 1., 0.)
result2 = sigma_clipped_stats(data, mask=mask, axis=0)
assert_equal(result, result2)
# test list data with mask_value
result = sigma_clipped_stats(data, mask_value=0.)
assert isinstance(result[1], float)
assert result == (1., 1., 0.)
# test without mask
data = [0, 2]
result = sigma_clipped_stats(data)
assert isinstance(result[1], float)
assert result == (1., 1., 1.)
_data = np.arange(10)
data = np.ma.MaskedArray([_data, _data, 10 * _data])
mean = sigma_clip(data, axis=0, sigma=1).mean(axis=0)
assert_equal(mean, _data)
mean, median, stddev = sigma_clipped_stats(data, axis=0, sigma=1)
assert_equal(mean, _data)
assert_equal(median, _data)
assert_equal(stddev, np.zeros_like(_data))
def test_sigma_clipped_stats_ddof():
with NumpyRNGContext(12345):
data = randn(10000)
data[10] = 1.e5
mean1, median1, stddev1 = sigma_clipped_stats(data)
mean2, median2, stddev2 = sigma_clipped_stats(data, std_ddof=1)
assert mean1 == mean2
assert median1 == median2
assert_allclose(stddev1, 0.98156805711673156)
assert_allclose(stddev2, 0.98161731654802831)
def test_invalid_sigma_clip():
"""Test sigma_clip of data containing invalid values."""
data = np.ones((5, 5))
data[2, 2] = 1000
data[3, 4] = np.nan
data[1, 1] = np.inf
result = sigma_clip(data)
# Pre #4051 if data contains any NaN or infs sigma_clip returns the
# mask containing `False` only or TypeError if data also contains a
# masked value.
assert result.mask[2, 2]
assert result.mask[3, 4]
assert result.mask[1, 1]
result2 = sigma_clip(data, axis=0)
assert result2.mask[1, 1]
assert result2.mask[3, 4]
result3 = sigma_clip(data, axis=0, copy=False)
assert result3.mask[1, 1]
assert result3.mask[3, 4]
# stats along axis with all nans
data[0, :] = np.nan # row of all nans
result4, minarr, maxarr = sigma_clip(data, axis=1, masked=False,
return_bounds=True)
assert np.isnan(minarr[0])
assert np.isnan(maxarr[0])
def test_sigmaclip_negative_axis():
"""Test that dimensions are expanded correctly even if axis is negative."""
data = np.ones((3, 4))
# without correct expand_dims this would raise a ValueError
sigma_clip(data, axis=-1)
def test_sigmaclip_fully_masked():
"""Make sure a fully masked array is returned when sigma clipping a fully
masked array.
"""
data = np.ma.MaskedArray(data=[[1., 0.], [0., 1.]],
mask=[[True, True], [True, True]])
clipped_data = sigma_clip(data)
np.ma.allequal(data, clipped_data)
def test_sigmaclip_empty_masked():
"""Make sure a empty masked array is returned when sigma clipping an empty
masked array.
"""
data = np.ma.MaskedArray(data=[], mask=[])
clipped_data = sigma_clip(data)
np.ma.allequal(data, clipped_data)
def test_sigmaclip_empty():
"""Make sure a empty array is returned when sigma clipping an empty array.
"""
data = np.array([])
clipped_data = sigma_clip(data)
assert_equal(data, clipped_data)
def test_sigma_clip_axis_tuple_3D():
"""Test sigma clipping over a subset of axes (issue #7227).
"""
data = np.sin(0.78 * np.arange(27)).reshape(3, 3, 3)
mask = np.zeros_like(data, dtype=np.bool)
data_t = np.rollaxis(data, 1, 0)
mask_t = np.rollaxis(mask, 1, 0)
# Loop over what was originally axis 1 and clip each plane directly:
for data_plane, mask_plane in zip(data_t, mask_t):
mean = data_plane.mean()
maxdev = 1.5 * data_plane.std()
mask_plane[:] = np.logical_or(data_plane < mean - maxdev,
data_plane > mean + maxdev)
# Do the equivalent thing using sigma_clip:
result = sigma_clip(data, sigma=1.5, cenfunc=np.mean, maxiters=1,
axis=(0, -1))
assert_equal(result.mask, mask)
def test_sigmaclip_repr():
sigclip = SigmaClip()
sigclip_repr = ('SigmaClip(sigma=3.0, sigma_lower=3.0, sigma_upper=3.0,'
' maxiters=5, cenfunc=')
sigclip_str = ('<SigmaClip>\n sigma: 3.0\n sigma_lower: 3.0\n'
' sigma_upper: 3.0\n maxiters: 5\n cenfunc: ')
assert repr(sigclip).startswith(sigclip_repr)
assert str(sigclip).startswith(sigclip_str)
|
923cfc507f050310dc805031a0ba8b4276cd8db199435c51d739a4172e958479 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.random import randn, normal
from numpy.testing import assert_allclose, assert_array_almost_equal_nulp
from ..biweight import (biweight_location, biweight_scale,
biweight_midvariance, biweight_midcovariance,
biweight_midcorrelation)
from ...tests.helper import catch_warnings
from ...utils.misc import NumpyRNGContext
def test_biweight_location():
with NumpyRNGContext(12345):
# test that it runs
randvar = randn(10000)
cbl = biweight_location(randvar)
assert abs(cbl - 0) < 1e-2
def test_biweight_location_small():
cbl = biweight_location([1, 3, 5, 500, 2])
assert abs(cbl - 2.745) < 1e-3
def test_biweight_location_axis():
"""Test a 2D array with the axis keyword."""
with NumpyRNGContext(12345):
ny = 100
nx = 200
data = normal(5, 2, (ny, nx))
bw = biweight_location(data, axis=0)
bwi = []
for i in range(nx):
bwi.append(biweight_location(data[:, i]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
bw = biweight_location(data, axis=1)
bwi = []
for i in range(ny):
bwi.append(biweight_location(data[i, :]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
def test_biweight_location_axis_3d():
"""Test a 3D array with the axis keyword."""
with NumpyRNGContext(12345):
nz = 3
ny = 4
nx = 5
data = normal(5, 2, (nz, ny, nx))
bw = biweight_location(data, axis=0)
assert bw.shape == (ny, nx)
y = 0
bwi = []
for i in range(nx):
bwi.append(biweight_location(data[:, y, i]))
bwi = np.array(bwi)
assert_allclose(bw[y], bwi)
def test_biweight_scale():
# NOTE: biweight_scale is covered by biweight_midvariance tests
data = [1, 3, 5, 500, 2]
scl = biweight_scale(data)
var = biweight_midvariance(data)
assert_allclose(scl, np.sqrt(var))
def test_biweight_midvariance():
with NumpyRNGContext(12345):
# test that it runs
randvar = randn(10000)
var = biweight_midvariance(randvar)
assert_allclose(var, 1.0, rtol=0.02)
def test_biweight_midvariance_small():
data = [1, 3, 5, 500, 2]
var = biweight_midvariance(data)
assert_allclose(var, 2.9238456) # verified with R
var = biweight_midvariance(data, modify_sample_size=True)
assert_allclose(var, 2.3390765)
def test_biweight_midvariance_5127():
# test a regression introduced in #5127
rand = np.random.RandomState(12345)
data = rand.normal(loc=0., scale=20., size=(100, 100))
var = biweight_midvariance(data)
assert_allclose(var, 406.86938710817344) # verified with R
def test_biweight_midvariance_axis():
"""Test a 2D array with the axis keyword."""
with NumpyRNGContext(12345):
ny = 100
nx = 200
data = normal(5, 2, (ny, nx))
bw = biweight_midvariance(data, axis=0)
bwi = []
for i in range(nx):
bwi.append(biweight_midvariance(data[:, i]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
bw = biweight_midvariance(data, axis=1)
bwi = []
for i in range(ny):
bwi.append(biweight_midvariance(data[i, :]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
def test_biweight_midvariance_axis_3d():
"""Test a 3D array with the axis keyword."""
with NumpyRNGContext(12345):
nz = 3
ny = 4
nx = 5
data = normal(5, 2, (nz, ny, nx))
bw = biweight_midvariance(data, axis=0)
assert bw.shape == (ny, nx)
y = 0
bwi = []
for i in range(nx):
bwi.append(biweight_midvariance(data[:, y, i]))
bwi = np.array(bwi)
assert_allclose(bw[y], bwi)
def test_biweight_midcovariance_1d():
d = [0, 1, 2]
cov = biweight_midcovariance(d)
var = biweight_midvariance(d)
assert_allclose(cov, [[var]])
def test_biweight_midcovariance_2d():
d = [[0, 1, 2], [2, 1, 0]]
cov = biweight_midcovariance(d)
val = 0.70121809
assert_allclose(cov, [[val, -val], [-val, val]]) # verified with R
d = [[5, 1, 10], [500, 5, 2]]
cov = biweight_midcovariance(d)
assert_allclose(cov, [[14.54159077, -7.79026256], # verified with R
[-7.79026256, 6.92087252]])
cov = biweight_midcovariance(d, modify_sample_size=True)
assert_allclose(cov, [[14.54159077, -5.19350838],
[-5.19350838, 4.61391501]])
def test_biweight_midcovariance_midvariance():
"""
Test that biweight_midcovariance diagonal elements agree with
biweight_midvariance.
"""
rng = np.random.RandomState(1)
d = rng.normal(0, 2, size=(100, 3))
cov = biweight_midcovariance(d)
var = [biweight_midvariance(a) for a in d]
assert_allclose(cov.diagonal(), var)
cov2 = biweight_midcovariance(d, modify_sample_size=True)
var2 = [biweight_midvariance(a, modify_sample_size=True)
for a in d]
assert_allclose(cov2.diagonal(), var2)
def test_midcovariance_shape():
"""
Test that biweight_midcovariance raises error with a 3D array.
"""
d = np.ones(27).reshape(3, 3, 3)
with pytest.raises(ValueError) as e:
biweight_midcovariance(d)
assert 'The input array must be 2D or 1D.' in str(e.value)
def test_midcovariance_M_shape():
"""
Test that biweight_midcovariance raises error when M is not a scalar
or 1D array.
"""
d = [0, 1, 2]
M = [[0, 1], [2, 3]]
with pytest.raises(ValueError) as e:
biweight_midcovariance(d, M=M)
assert 'M must be a scalar or 1D array.' in str(e.value)
def test_biweight_midcovariance_symmetric():
"""
Regression test to ensure that midcovariance matrix is symmetric
when ``modify_sample_size=True`` (see #5972).
"""
rng = np.random.RandomState(1)
d = rng.gamma(2, 2, size=(3, 500))
cov = biweight_midcovariance(d)
assert_array_almost_equal_nulp(cov, cov.T, nulp=5)
cov = biweight_midcovariance(d, modify_sample_size=True)
assert_array_almost_equal_nulp(cov, cov.T, nulp=5)
def test_biweight_midcorrelation():
x = [0, 1, 2]
y = [2, 1, 0]
assert_allclose(biweight_midcorrelation(x, x), 1.0)
assert_allclose(biweight_midcorrelation(x, y), -1.0)
x = [5, 1, 10, 12.4, 13.2]
y = [500, 5, 2, 7.1, 0.9]
# verified with R
assert_allclose(biweight_midcorrelation(x, y), -0.14411038976763313)
def test_biweight_midcorrelation_inputs():
a1 = np.ones((3, 3))
a2 = np.ones(5)
a3 = np.ones(7)
with pytest.raises(ValueError) as e:
biweight_midcorrelation(a1, a2)
assert 'x must be a 1D array.' in str(e.value)
with pytest.raises(ValueError) as e:
biweight_midcorrelation(a2, a1)
assert 'y must be a 1D array.' in str(e.value)
with pytest.raises(ValueError) as e:
biweight_midcorrelation(a2, a3)
assert 'x and y must have the same shape.' in str(e.value)
def test_biweight_32bit_runtime_warnings():
"""Regression test for #6905."""
with NumpyRNGContext(12345):
data = np.random.random(100).astype(np.float32)
data[50] = 30000.
with catch_warnings(RuntimeWarning) as warning_lines:
biweight_scale(data)
assert len(warning_lines) == 0
with catch_warnings(RuntimeWarning) as warning_lines:
biweight_midvariance(data)
assert len(warning_lines) == 0
|
bca1bdda9a06543ddbf63083771c0a7a210c361b4cd4ca77d360c1d4abe5a59b | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from .... import units
from ....tests.helper import assert_quantity_allclose
from .. import BoxLeastSquares
from ...lombscargle.core import has_units
@pytest.fixture
def data():
rand = np.random.RandomState(123)
t = rand.uniform(0, 10, 500)
y = np.ones_like(t)
dy = rand.uniform(0.005, 0.01, len(t))
period = 2.0
transit_time = 0.5
duration = 0.16
depth = 0.2
m = np.abs((t-transit_time+0.5*period) % period-0.5*period) < 0.5*duration
y[m] = 1.0 - depth
y += dy * rand.randn(len(t))
return t, y, dy, dict(period=period, transit_time=transit_time,
duration=duration, depth=depth)
def test_32bit_bug():
rand = np.random.RandomState(42)
t = rand.uniform(0, 10, 500)
y = np.ones_like(t)
y[np.abs((t + 1.0) % 2.0-1) < 0.08] = 1.0 - 0.1
y += 0.01 * rand.randn(len(t))
model = BoxLeastSquares(t, y)
results = model.autopower(0.16)
assert np.allclose(results.period[np.argmax(results.power)],
2.005441310651872)
periods = np.linspace(1.9, 2.1, 5)
results = model.power(periods, 0.16)
assert np.allclose(
results.power,
np.array([0.01479464, 0.03804835, 0.09640946, 0.05199547, 0.01970484])
)
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
def test_correct_model(data, objective):
t, y, dy, params = data
model = BoxLeastSquares(t, y, dy)
periods = np.exp(np.linspace(np.log(params["period"]) - 0.1,
np.log(params["period"]) + 0.1, 1000))
results = model.power(periods, params["duration"], objective=objective)
ind = np.argmax(results.power)
for k, v in params.items():
assert_allclose(results[k][ind], v, atol=0.01)
chi = (results.depth[ind]-params["depth"]) / results.depth_err[ind]
assert np.abs(chi) < 1
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
@pytest.mark.parametrize("offset", [False, True])
def test_fast_method(data, objective, offset):
t, y, dy, params = data
if offset:
t = t - params["transit_time"] + params["period"]
model = BoxLeastSquares(t, y, dy)
periods = np.exp(np.linspace(np.log(params["period"]) - 1,
np.log(params["period"]) + 1, 10))
durations = params["duration"]
results = model.power(periods, durations, objective=objective)
results.assert_allclose(model.power(periods, durations,
method="slow", objective=objective))
def test_input_units(data):
t, y, dy, params = data
t_unit = units.day
y_unit = units.mag
with pytest.raises(units.UnitConversionError):
BoxLeastSquares(t * t_unit, y * y_unit, dy * units.one)
with pytest.raises(units.UnitConversionError):
BoxLeastSquares(t * t_unit, y * units.one, dy * y_unit)
with pytest.raises(units.UnitConversionError):
BoxLeastSquares(t * t_unit, y, dy * y_unit)
model = BoxLeastSquares(t*t_unit, y * units.one, dy)
assert model.dy.unit == model.y.unit
model = BoxLeastSquares(t*t_unit, y * y_unit, dy)
assert model.dy.unit == model.y.unit
model = BoxLeastSquares(t*t_unit, y*y_unit)
assert model.dy is None
def test_period_units(data):
t, y, dy, params = data
t_unit = units.day
y_unit = units.mag
model = BoxLeastSquares(t * t_unit, y * y_unit, dy)
p = model.autoperiod(params["duration"])
assert p.unit == t_unit
p = model.autoperiod(params["duration"] * 24 * units.hour)
assert p.unit == t_unit
with pytest.raises(units.UnitConversionError):
model.autoperiod(params["duration"] * units.mag)
p = model.autoperiod(params["duration"], minimum_period=0.5)
assert p.unit == t_unit
with pytest.raises(units.UnitConversionError):
p = model.autoperiod(params["duration"], minimum_period=0.5*units.mag)
p = model.autoperiod(params["duration"], maximum_period=0.5)
assert p.unit == t_unit
with pytest.raises(units.UnitConversionError):
p = model.autoperiod(params["duration"], maximum_period=0.5*units.mag)
p = model.autoperiod(params["duration"], minimum_period=0.5,
maximum_period=1.5)
p2 = model.autoperiod(params["duration"], maximum_period=0.5,
minimum_period=1.5)
assert_quantity_allclose(p, p2)
@pytest.mark.parametrize("method", ["fast", "slow"])
@pytest.mark.parametrize("with_err", [True, False])
@pytest.mark.parametrize("t_unit", [None, units.day])
@pytest.mark.parametrize("y_unit", [None, units.mag])
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
def test_results_units(data, method, with_err, t_unit, y_unit, objective):
t, y, dy, params = data
periods = np.linspace(params["period"]-1.0, params["period"]+1.0, 3)
if t_unit is not None:
t = t * t_unit
if y_unit is not None:
y = y * y_unit
dy = dy * y_unit
if not with_err:
dy = None
model = BoxLeastSquares(t, y, dy)
results = model.power(periods, params["duration"], method=method,
objective=objective)
if t_unit is None:
assert not has_units(results.period)
assert not has_units(results.duration)
assert not has_units(results.transit_time)
else:
assert results.period.unit == t_unit
assert results.duration.unit == t_unit
assert results.transit_time.unit == t_unit
if y_unit is None:
assert not has_units(results.power)
assert not has_units(results.depth)
assert not has_units(results.depth_err)
assert not has_units(results.depth_snr)
assert not has_units(results.log_likelihood)
else:
assert results.depth.unit == y_unit
assert results.depth_err.unit == y_unit
assert results.depth_snr.unit == units.one
if dy is None:
assert results.log_likelihood.unit == y_unit * y_unit
if objective == "snr":
assert results.power.unit == units.one
else:
assert results.power.unit == y_unit * y_unit
else:
assert results.log_likelihood.unit == units.one
assert results.power.unit == units.one
def test_autopower(data):
t, y, dy, params = data
duration = params["duration"] + np.linspace(-0.1, 0.1, 3)
model = BoxLeastSquares(t, y, dy)
period = model.autoperiod(duration)
results1 = model.power(period, duration)
results2 = model.autopower(duration)
results1.assert_allclose(results2)
@pytest.mark.parametrize("with_units", [True, False])
def test_model(data, with_units):
t, y, dy, params = data
# Compute the model using linear regression
A = np.zeros((len(t), 2))
p = params["period"]
dt = np.abs((t-params["transit_time"]+0.5*p) % p-0.5*p)
m_in = dt < 0.5*params["duration"]
A[~m_in, 0] = 1.0
A[m_in, 1] = 1.0
w = np.linalg.solve(np.dot(A.T, A / dy[:, None]**2),
np.dot(A.T, y / dy**2))
model_true = np.dot(A, w)
if with_units:
t = t * units.day
y = y * units.mag
dy = dy * units.mag
model_true = model_true * units.mag
# Compute the model using the periodogram
pgram = BoxLeastSquares(t, y, dy)
model = pgram.model(t, p, params["duration"], params["transit_time"])
# Make sure that the transit mask is consistent with the model
transit_mask = pgram.transit_mask(t, p, params["duration"],
params["transit_time"])
transit_mask0 = (model - model.max()) < 0.0
assert_allclose(transit_mask, transit_mask0)
assert_quantity_allclose(model, model_true)
@pytest.mark.parametrize("shape", [(1,), (2,), (3,), (2, 3)])
def test_shapes(data, shape):
t, y, dy, params = data
duration = params["duration"]
model = BoxLeastSquares(t, y, dy)
period = np.empty(shape)
period.flat = np.linspace(params["period"]-1, params["period"]+1,
period.size)
if len(period.shape) > 1:
with pytest.raises(ValueError):
results = model.power(period, duration)
else:
results = model.power(period, duration)
for k, v in results.items():
if k == "objective":
continue
assert v.shape == shape
@pytest.mark.parametrize("with_units", [True, False])
@pytest.mark.parametrize("with_err", [True, False])
def test_compute_stats(data, with_units, with_err):
t, y, dy, params = data
y_unit = 1
if with_units:
y_unit = units.mag
t = t * units.day
y = y * units.mag
dy = dy * units.mag
params["period"] = params["period"] * units.day
params["duration"] = params["duration"] * units.day
params["transit_time"] = params["transit_time"] * units.day
params["depth"] = params["depth"] * units.mag
if not with_err:
dy = None
model = BoxLeastSquares(t, y, dy)
results = model.power(params["period"], params["duration"],
oversample=1000)
stats = model.compute_stats(params["period"], params["duration"],
params["transit_time"])
# Test the calculated transit times
tt = params["period"] * np.arange(int(t.max() / params["period"]) + 1)
tt += params["transit_time"]
assert_quantity_allclose(tt, stats["transit_times"])
# Test that the other parameters are consistent with the periodogram
assert_allclose(stats["per_transit_count"], np.array([9, 7, 7, 7, 8]))
assert_quantity_allclose(np.sum(stats["per_transit_log_likelihood"]),
results["log_likelihood"])
assert_quantity_allclose(stats["depth"][0], results["depth"])
# Check the half period result
results_half = model.power(0.5*params["period"], params["duration"],
oversample=1000)
assert_quantity_allclose(stats["depth_half"][0], results_half["depth"])
# Skip the uncertainty tests when the input errors are None
if not with_err:
assert_quantity_allclose(stats["harmonic_amplitude"],
0.029945029964964204 * y_unit)
assert_quantity_allclose(stats["harmonic_delta_log_likelihood"],
-0.5875918155223113 * y_unit * y_unit)
return
assert_quantity_allclose(stats["harmonic_amplitude"],
0.033027988742275853 * y_unit)
assert_quantity_allclose(stats["harmonic_delta_log_likelihood"],
-12407.505922833765)
assert_quantity_allclose(stats["depth"][1], results["depth_err"])
assert_quantity_allclose(stats["depth_half"][1], results_half["depth_err"])
for f, k in zip((1.0, 1.0, 1.0, 0.0),
("depth", "depth_even", "depth_odd", "depth_phased")):
assert np.abs((stats[k][0]-f*params["depth"]) / stats[k][1]) < 1.0
|
8aa4415739c4d5cb06ecbe38b0e121f4a4ebfb8efa8c3e6c4edebef17633af3f | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import gc
import sys
import copy
from io import StringIO
from collections import OrderedDict
import pytest
import numpy as np
from numpy.testing import assert_allclose
from ...io import fits
from ...tests.helper import (assert_follows_unicode_guidelines,
ignore_warnings, catch_warnings)
from ...utils.data import get_pkg_data_filename
from ... import table
from ... import units as u
from .conftest import MaskedTable
try:
with ignore_warnings(DeprecationWarning):
# Ignore DeprecationWarning on pandas import in Python 3.5--see
# https://github.com/astropy/astropy/issues/4380
import pandas # pylint: disable=W0611
except ImportError:
HAS_PANDAS = False
else:
HAS_PANDAS = True
class SetupData:
def _setup(self, table_types):
self._table_type = table_types.Table
self._column_type = table_types.Column
@property
def a(self):
if self._column_type is not None:
if not hasattr(self, '_a'):
self._a = self._column_type(
[1, 2, 3], name='a', format='%d',
meta={'aa': [0, 1, 2, 3, 4]})
return self._a
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, '_b'):
self._b = self._column_type(
[4, 5, 6], name='b', format='%d', meta={'aa': 1})
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, '_c'):
self._c = self._column_type([7, 8, 9], 'c')
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, '_d'):
self._d = self._column_type([7, 8, 7], 'd')
return self._d
@property
def obj(self):
if self._column_type is not None:
if not hasattr(self, '_obj'):
self._obj = self._column_type([1, 'string', 3], 'obj', dtype='O')
return self._obj
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a, self.b])
return self._t
@pytest.mark.usefixtures('table_types')
class TestSetTableColumn(SetupData):
def test_set_row(self, table_types):
"""Set a row from a tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[1] = (20, 21)
assert t['a'][0] == 1
assert t['a'][1] == 20
assert t['a'][2] == 3
assert t['b'][0] == 4
assert t['b'][1] == 21
assert t['b'][2] == 6
def test_set_row_existing(self, table_types):
"""Set a row from another existing row"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[0] = t[1]
assert t[0][0] == 2
assert t[0][1] == 5
def test_set_row_fail_1(self, table_types):
"""Set a row from an incorrectly-sized or typed set of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = (20, 21, 22)
with pytest.raises(ValueError):
t[1] = 0
def test_set_row_fail_2(self, table_types):
"""Set a row from an incorrectly-typed tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = ('abc', 'def')
def test_set_new_col_new_table(self, table_types):
"""Create a new column in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t['aa'] = self.a
# Test that the new column name is 'aa' and that the values match
assert np.all(t['aa'] == self.a)
assert t.colnames == ['aa']
def test_set_new_col_new_table_quantity(self, table_types):
"""Create a new column (from a quantity) in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t['aa'] = np.array([1, 2, 3]) * u.m
assert np.all(t['aa'] == np.array([1, 2, 3]))
assert t['aa'].unit == u.m
t['bb'] = 3 * u.m
assert np.all(t['bb'] == 3)
assert t['bb'].unit == u.m
def test_set_new_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Add a column
t['bb'] = self.b
assert np.all(t['bb'] == self.b)
assert t.colnames == ['a', 'bb']
assert t['bb'].meta == self.b.meta
assert t['bb'].format == self.b.format
# Add another column
t['c'] = t['a']
assert np.all(t['c'] == t['a'])
assert t.colnames == ['a', 'bb', 'c']
assert t['c'].meta == t['a'].meta
assert t['c'].format == t['a'].format
# Add a multi-dimensional column
t['d'] = table_types.Column(np.arange(12).reshape(3, 2, 2))
assert t['d'].shape == (3, 2, 2)
assert t['d'][0, 0, 1] == 1
# Add column from a list
t['e'] = ['hello', 'the', 'world']
assert np.all(t['e'] == np.array(['hello', 'the', 'world']))
# Make sure setting existing column still works
t['e'] = ['world', 'hello', 'the']
assert np.all(t['e'] == np.array(['world', 'hello', 'the']))
# Add a column via broadcasting
t['f'] = 10
assert np.all(t['f'] == 10)
# Add a column from a Quantity
t['g'] = np.array([1, 2, 3]) * u.m
assert np.all(t['g'].data == np.array([1, 2, 3]))
assert t['g'].unit == u.m
# Add a column from a (scalar) Quantity
t['g'] = 3 * u.m
assert np.all(t['g'].data == 3)
assert t['g'].unit == u.m
def test_set_new_unmasked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.Column(name='b', data=[1, 2, 3]) # unmasked
t['b'] = b
assert np.all(t['b'] == b)
def test_set_new_masked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.MaskedColumn(name='b', data=[1, 2, 3]) # masked
t['b'] = b
assert np.all(t['b'] == b)
def test_set_new_col_existing_table_fail(self, table_types):
"""Generate failure when creating a new column using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Wrong size
with pytest.raises(ValueError):
t['b'] = [1, 2]
@pytest.mark.usefixtures('table_types')
class TestEmptyData():
def test_1(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', dtype=int, length=100))
assert len(t['a']) == 100
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', dtype=int, shape=(3, ), length=100))
assert len(t['a']) == 100
def test_3(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name='a', dtype=int))
assert len(t['a']) == 0
def test_4(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name='a', dtype=int, shape=(3, 4)))
assert len(t['a']) == 0
def test_5(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a')) # dtype is not specified
assert len(t['a']) == 0
def test_add_via_setitem_and_slice(self, table_types):
"""Test related to #3023 where a MaskedColumn is created with name=None
and then gets changed to name='a'. After PR #2790 this test fails
without the #3023 fix."""
t = table_types.Table()
t['a'] = table_types.Column([1, 2, 3])
t2 = t[:]
assert t2.colnames == t.colnames
@pytest.mark.usefixtures('table_types')
class TestNewFromColumns():
def test_simple(self, table_types):
cols = [table_types.Column(name='a', data=[1, 2, 3]),
table_types.Column(name='b', data=[4, 5, 6], dtype=np.float32)]
t = table_types.Table(cols)
assert np.all(t['a'].data == np.array([1, 2, 3]))
assert np.all(t['b'].data == np.array([4, 5, 6], dtype=np.float32))
assert type(t['b'][1]) is np.float32
def test_from_np_array(self, table_types):
cols = [table_types.Column(name='a', data=np.array([1, 2, 3], dtype=np.int64),
dtype=np.float64),
table_types.Column(name='b', data=np.array([4, 5, 6], dtype=np.float32))]
t = table_types.Table(cols)
assert np.all(t['a'] == np.array([1, 2, 3], dtype=np.float64))
assert np.all(t['b'] == np.array([4, 5, 6], dtype=np.float32))
assert type(t['a'][1]) is np.float64
assert type(t['b'][1]) is np.float32
def test_size_mismatch(self, table_types):
cols = [table_types.Column(name='a', data=[1, 2, 3]),
table_types.Column(name='b', data=[4, 5, 6, 7])]
with pytest.raises(ValueError):
table_types.Table(cols)
def test_name_none(self, table_types):
"""Column with name=None can init a table whether or not names are supplied"""
c = table_types.Column(data=[1, 2], name='c')
d = table_types.Column(data=[3, 4])
t = table_types.Table([c, d], names=(None, 'd'))
assert t.colnames == ['c', 'd']
t = table_types.Table([c, d])
assert t.colnames == ['c', 'col1']
@pytest.mark.usefixtures('table_types')
class TestReverse():
def test_reverse(self, table_types):
t = table_types.Table([[1, 2, 3],
['a', 'b', 'cc']])
t.reverse()
assert np.all(t['col0'] == np.array([3, 2, 1]))
assert np.all(t['col1'] == np.array(['cc', 'b', 'a']))
t2 = table_types.Table(t, copy=False)
assert np.all(t2['col0'] == np.array([3, 2, 1]))
assert np.all(t2['col1'] == np.array(['cc', 'b', 'a']))
t2 = table_types.Table(t, copy=True)
assert np.all(t2['col0'] == np.array([3, 2, 1]))
assert np.all(t2['col1'] == np.array(['cc', 'b', 'a']))
t2.sort('col0')
assert np.all(t2['col0'] == np.array([1, 2, 3]))
assert np.all(t2['col1'] == np.array(['a', 'b', 'cc']))
def test_reverse_big(self, table_types):
x = np.arange(10000)
y = x + 1
t = table_types.Table([x, y], names=('x', 'y'))
t.reverse()
assert np.all(t['x'] == x[::-1])
assert np.all(t['y'] == y[::-1])
@pytest.mark.usefixtures('table_types')
class TestColumnAccess():
def test_1(self, table_types):
t = table_types.Table()
with pytest.raises(KeyError):
t['a']
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[1, 2, 3]))
assert np.all(t['a'] == np.array([1, 2, 3]))
with pytest.raises(KeyError):
t['b'] # column does not exist
def test_itercols(self, table_types):
names = ['a', 'b', 'c']
t = table_types.Table([[1], [2], [3]], names=names)
for name, col in zip(names, t.itercols()):
assert name == col.name
assert isinstance(col, table_types.Column)
@pytest.mark.usefixtures('table_types')
class TestAddLength(SetupData):
def test_right_length(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b)
def test_too_long(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='b', data=[4, 5, 6, 7])) # data too long
def test_too_short(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='b', data=[4, 5])) # data too short
@pytest.mark.usefixtures('table_types')
class TestAddPosition(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 0)
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 1)
def test_3(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, -1)
def test_5(self, table_types):
self._setup(table_types)
t = table_types.Table()
with pytest.raises(ValueError):
t.index_column('b')
def test_6(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b)
assert t.columns.keys() == ['a', 'b']
def test_7(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column('a'))
assert t.columns.keys() == ['b', 'a']
def test_8(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column('a') + 1)
assert t.columns.keys() == ['a', 'b']
def test_9(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b, t.index_column('a') + 1)
t.add_column(self.c, t.index_column('b'))
assert t.columns.keys() == ['a', 'c', 'b']
def test_10(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
ia = t.index_column('a')
t.add_column(self.b, ia + 1)
t.add_column(self.c, ia)
assert t.columns.keys() == ['c', 'a', 'b']
@pytest.mark.usefixtures('table_types')
class TestAddName(SetupData):
def test_override_name(self, table_types):
self._setup(table_types)
t = table_types.Table()
# Check that we can override the name of the input column in the Table
t.add_column(self.a, name='b')
t.add_column(self.b, name='a')
assert t.columns.keys() == ['b', 'a']
# Check that we did not change the name of the input column
assert self.a.info.name == 'a'
assert self.b.info.name == 'b'
# Now test with an input column from another table
t2 = table_types.Table()
t2.add_column(t['a'], name='c')
assert t2.columns.keys() == ['c']
# Check that we did not change the name of the input column
assert t.columns.keys() == ['b', 'a']
# Check that we can give a name if none was present
col = table_types.Column([1, 2, 3])
t.add_column(col, name='c')
assert t.columns.keys() == ['b', 'a', 'c']
def test_default_name(self, table_types):
t = table_types.Table()
col = table_types.Column([1, 2, 3])
t.add_column(col)
assert t.columns.keys() == ['col0']
@pytest.mark.usefixtures('table_types')
class TestInitFromTable(SetupData):
def test_from_table_cols(self, table_types):
"""Ensure that using cols from an existing table gives
a clean copy.
"""
self._setup(table_types)
t = self.t
cols = t.columns
# Construct Table with cols via Table._new_from_cols
t2a = table_types.Table([cols['a'], cols['b'], self.c])
# Construct with add_column
t2b = table_types.Table()
t2b.add_column(cols['a'])
t2b.add_column(cols['b'])
t2b.add_column(self.c)
t['a'][1] = 20
t['b'][1] = 21
for t2 in [t2a, t2b]:
t2['a'][2] = 10
t2['b'][2] = 11
t2['c'][2] = 12
t2.columns['a'].meta['aa'][3] = 10
assert np.all(t['a'] == np.array([1, 20, 3]))
assert np.all(t['b'] == np.array([4, 21, 6]))
assert np.all(t2['a'] == np.array([1, 2, 10]))
assert np.all(t2['b'] == np.array([4, 5, 11]))
assert np.all(t2['c'] == np.array([7, 8, 12]))
assert t2['a'].name == 'a'
assert t2.columns['a'].meta['aa'][3] == 10
assert t.columns['a'].meta['aa'][3] == 3
@pytest.mark.usefixtures('table_types')
class TestAddColumns(SetupData):
def test_add_columns1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c])
assert t.colnames == ['a', 'b', 'c']
def test_add_columns2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d])
assert t.colnames == ['a', 'b', 'c', 'd']
assert np.all(t['c'] == np.array([7, 8, 9]))
def test_add_columns3(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[1, 0])
assert t.colnames == ['d', 'a', 'c', 'b']
def test_add_columns4(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[0, 0])
assert t.colnames == ['c', 'd', 'a', 'b']
def test_add_columns5(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[2, 2])
assert t.colnames == ['a', 'b', 'c', 'd']
def test_add_columns6(self, table_types):
"""Check that we can override column names."""
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c], names=['b', 'c', 'a'])
assert t.colnames == ['b', 'c', 'a']
def test_add_columns7(self, table_types):
"""Check that default names are used when appropriate."""
t = table_types.Table()
col0 = table_types.Column([1, 2, 3])
col1 = table_types.Column([4, 5, 3])
t.add_columns([col0, col1])
assert t.colnames == ['col0', 'col1']
def test_add_duplicate_column(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='a', data=[0, 1, 2]))
t.add_column(table_types.Column(name='a', data=[0, 1, 2]),
rename_duplicate=True)
t.add_column(self.b)
t.add_column(self.c)
assert t.colnames == ['a', 'a_1', 'b', 'c']
t.add_column(table_types.Column(name='a', data=[0, 1, 2]),
rename_duplicate=True)
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2']
# test adding column from a separate Table
t1 = table_types.Table()
t1.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(t1['a'])
t.add_column(t1['a'], rename_duplicate=True)
t1['a'][0] = 100 # Change original column
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3']
assert t1.colnames == ['a']
# Check new column didn't change (since name conflict forced a copy)
assert t['a_3'][0] == self.a[0]
# Check that rename_duplicate=True is ok if there are no duplicates
t.add_column(table_types.Column(name='q', data=[0, 1, 2]),
rename_duplicate=True)
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3', 'q']
def test_add_duplicate_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
with pytest.raises(ValueError):
t.add_columns([table_types.Column(name='a', data=[0, 1, 2]), table_types.Column(name='b', data=[0, 1, 2])])
t.add_columns([table_types.Column(name='a', data=[0, 1, 2]),
table_types.Column(name='b', data=[0, 1, 2])],
rename_duplicate=True)
t.add_column(self.d)
assert t.colnames == ['a', 'b', 'c', 'a_1', 'b_1', 'd']
@pytest.mark.usefixtures('table_types')
class TestAddRow(SetupData):
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, '_b'):
self._b = self._column_type(name='b', data=[4.0, 5.1, 6.2])
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, '_c'):
self._c = self._column_type(name='c', data=['7', '8', '9'])
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, '_d'):
self._d = self._column_type(name='d', data=[[1, 2], [3, 4], [5, 6]])
return self._d
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a, self.b, self.c])
return self._t
def test_add_none_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=('a', 'b', 'c'), dtype=('(2,)i', 'S4', 'O'))
t.add_row()
assert np.all(t['a'][0] == [0, 0])
assert t['b'][0] == ''
assert t['c'][0] == 0
t.add_row()
assert np.all(t['a'][1] == [0, 0])
assert t['b'][1] == ''
assert t['c'][1] == 0
def test_add_stuff_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=('a', 'b', 'obj'), dtype=('(2,)i', 'S8', 'O'))
t.add_row([[1, 2], 'hello', 'world'])
assert np.all(t['a'][0] == [1, 2])
assert t['b'][0] == 'hello'
assert t['obj'][0] == 'world'
# Make sure it is not repeating last row but instead
# adding zeros (as documented)
t.add_row()
assert np.all(t['a'][1] == [0, 0])
assert t['b'][1] == ''
assert t['obj'][1] == 0
def test_add_table_row(self, table_types):
self._setup(table_types)
t = self.t
t['d'] = self.d
t2 = table_types.Table([self.a, self.b, self.c, self.d])
t.add_row(t2[0])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 1]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t['c'] == np.array(['7', '8', '9', '7']))
assert np.all(t['d'] == np.array([[1, 2], [3, 4], [5, 6], [1, 2]]))
def test_add_table_row_obj(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.obj])
t.add_row([1, 4.0, [10]])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 1]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t['obj'] == np.array([1, 'string', 3, [10]], dtype='O'))
def test_add_qtable_row_multidimensional(self):
q = [[1, 2], [3, 4]] * u.m
qt = table.QTable([q])
qt.add_row(([5, 6] * u.km,))
assert np.all(qt['col0'] == [[1, 2], [3, 4], [5000, 6000]] * u.m)
def test_add_with_tuple(self, table_types):
self._setup(table_types)
t = self.t
t.add_row((4, 7.2, '1'))
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t['c'] == np.array(['7', '8', '9', '1']))
def test_add_with_list(self, table_types):
self._setup(table_types)
t = self.t
t.add_row([4, 7.2, '10'])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t['c'] == np.array(['7', '8', '9', '1']))
def test_add_with_dict(self, table_types):
self._setup(table_types)
t = self.t
t.add_row({'a': 4, 'b': 7.2})
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
if t.masked:
assert np.all(t['c'] == np.array(['7', '8', '9', '7']))
else:
assert np.all(t['c'] == np.array(['7', '8', '9', '']))
def test_add_with_none(self, table_types):
self._setup(table_types)
t = self.t
t.add_row()
assert len(t) == 4
assert np.all(t['a'].data == np.array([1, 2, 3, 0]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 0.0]))
assert np.all(t['c'].data == np.array(['7', '8', '9', '']))
def test_add_missing_column(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row({'bad_column': 1})
def test_wrong_size_tuple(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row((1, 2))
def test_wrong_vals_type(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(TypeError):
t.add_row(1)
def test_add_row_failures(self, table_types):
self._setup(table_types)
t = self.t
t_copy = table_types.Table(t, copy=True)
# Wrong number of columns
try:
t.add_row([1, 2, 3, 4])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
# Wrong data type
try:
t.add_row(['one', 2, 3])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
def test_insert_table_row(self, table_types):
"""
Light testing of Table.insert_row() method. The deep testing is done via
the add_row() tests which calls insert_row(index=len(self), ...), so
here just test that the added index parameter is handled correctly.
"""
self._setup(table_types)
row = (10, 40.0, 'x', [10, 20])
for index in range(-3, 4):
indices = np.insert(np.arange(3), index, 3)
t = table_types.Table([self.a, self.b, self.c, self.d])
t2 = t.copy()
t.add_row(row) # By now we know this works
t2.insert_row(index, row)
for name in t.colnames:
if t[name].dtype.kind == 'f':
assert np.allclose(t[name][indices], t2[name])
else:
assert np.all(t[name][indices] == t2[name])
for index in (-4, 4):
t = table_types.Table([self.a, self.b, self.c, self.d])
with pytest.raises(IndexError):
t.insert_row(index, row)
@pytest.mark.usefixtures('table_types')
class TestTableColumn(SetupData):
def test_column_view(self, table_types):
self._setup(table_types)
t = self.t
a = t.columns['a']
a[2] = 10
assert t['a'][2] == 10
@pytest.mark.usefixtures('table_types')
class TestArrayColumns(SetupData):
def test_1d(self, table_types):
self._setup(table_types)
b = table_types.Column(name='b', dtype=int, shape=(2, ), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t['b'].shape == (3, 2)
assert t['b'][0].shape == (2, )
def test_2d(self, table_types):
self._setup(table_types)
b = table_types.Column(name='b', dtype=int, shape=(2, 4), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t['b'].shape == (3, 2, 4)
assert t['b'][0].shape == (2, 4)
def test_3d(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
b = table_types.Column(name='b', dtype=int, shape=(2, 4, 6), length=3)
t.add_column(b)
assert t['b'].shape == (3, 2, 4, 6)
assert t['b'][0].shape == (2, 4, 6)
@pytest.mark.usefixtures('table_types')
class TestRemove(SetupData):
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a])
return self._t
@property
def t2(self):
if self._table_type is not None:
if not hasattr(self, '_t2'):
self._t2 = self._table_type([self.a, self.b, self.c])
return self._t2
def test_1(self, table_types):
self._setup(table_types)
self.t.remove_columns('a')
assert self.t.columns.keys() == []
assert self.t.as_array() is None
def test_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_columns('a')
assert self.t.columns.keys() == ['b']
assert self.t.dtype.names == ('b',)
assert np.all(self.t['b'] == np.array([4, 5, 6]))
def test_3(self, table_types):
"""Check remove_columns works for a single column with a name of
more than one character. Regression test against #2699"""
self._setup(table_types)
self.t['new_column'] = self.t['a']
assert 'new_column' in self.t.columns.keys()
self.t.remove_columns('new_column')
assert 'new_column' not in self.t.columns.keys()
def test_remove_nonexistent_row(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
self.t.remove_row(4)
def test_remove_row_0(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(0)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['b'] == np.array([5, 6]))
def test_remove_row_1(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(1)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['a'] == np.array([1, 3]))
def test_remove_row_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(2)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([7, 8]))
def test_remove_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows(slice(0, 2, 1))
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([9]))
def test_remove_row_list(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows([0, 2])
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([8]))
def test_remove_row_preserves_meta(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_rows([0, 2])
assert self.t['a'].meta == {'aa': [0, 1, 2, 3, 4]}
assert self.t.dtype == np.dtype([(str('a'), 'int'),
(str('b'), 'int')])
def test_delitem_row(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[1]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['a'] == np.array([1, 3]))
@pytest.mark.parametrize("idx", [[0, 2], np.array([0, 2])])
def test_delitem_row_list(self, table_types, idx):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[idx]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([8]))
def test_delitem_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[0:2]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([9]))
def test_delitem_row_fail(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[4]
def test_delitem_row_float(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[1.]
def test_delitem1(self, table_types):
self._setup(table_types)
del self.t['a']
assert self.t.columns.keys() == []
assert self.t.as_array() is None
def test_delitem2(self, table_types):
self._setup(table_types)
del self.t2['b']
assert self.t2.colnames == ['a', 'c']
def test_delitems(self, table_types):
self._setup(table_types)
del self.t2['a', 'b']
assert self.t2.colnames == ['c']
def test_delitem_fail(self, table_types):
self._setup(table_types)
with pytest.raises(KeyError):
del self.t['d']
@pytest.mark.usefixtures('table_types')
class TestKeep(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns([])
assert t.columns.keys() == []
assert t.as_array() is None
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns('b')
assert t.columns.keys() == ['b']
assert t.dtype.names == ('b',)
assert np.all(t['b'] == np.array([4, 5, 6]))
@pytest.mark.usefixtures('table_types')
class TestRename(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.rename_column('a', 'b')
assert t.columns.keys() == ['b']
assert t.dtype.names == ('b',)
assert np.all(t['b'] == np.array([1, 2, 3]))
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.rename_column('a', 'c')
t.rename_column('b', 'a')
assert t.columns.keys() == ['c', 'a']
assert t.dtype.names == ('c', 'a')
if t.masked:
assert t.mask.dtype.names == ('c', 'a')
assert np.all(t['c'] == np.array([1, 2, 3]))
assert np.all(t['a'] == np.array([4, 5, 6]))
def test_rename_by_attr(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t['a'].name = 'c'
t['b'].name = 'a'
assert t.columns.keys() == ['c', 'a']
assert t.dtype.names == ('c', 'a')
assert np.all(t['c'] == np.array([1, 2, 3]))
assert np.all(t['a'] == np.array([4, 5, 6]))
@pytest.mark.usefixtures('table_types')
class TestSort():
def test_single(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4]))
t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)]))
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
t.sort('a')
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['b'] == np.array([5, 6, 4]))
assert np.all(t['c'] == np.array([[3, 4],
[1, 2],
[4, 5]]))
t.sort('b')
assert np.all(t['a'] == np.array([3, 1, 2]))
assert np.all(t['b'] == np.array([4, 5, 6]))
assert np.all(t['c'] == np.array([[4, 5],
[3, 4],
[1, 2]]))
def test_single_big(self, table_types):
"""Sort a big-ish table with a non-trivial sort order"""
x = np.arange(10000)
y = np.sin(x)
t = table_types.Table([x, y], names=('x', 'y'))
t.sort('y')
idx = np.argsort(y)
assert np.all(t['x'] == x[idx])
assert np.all(t['y'] == y[idx])
def test_empty(self, table_types):
t = table_types.Table([[], []], dtype=['f4', 'U1'])
t.sort('col1')
def test_multiple(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(['a', 'b'])
assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5]))
t.sort(['b', 'a'])
assert np.all(t['a'] == np.array([2, 1, 3, 1, 3, 2]))
assert np.all(t['b'] == np.array([3, 4, 4, 5, 5, 6]))
t.sort(('a', 'b'))
assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5]))
def test_multiple_with_bytes(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"]))
t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
t.sort(['name', 'firstname'])
assert np.all([t['firstname'] == np.array([b"John", b"Jo", b"Max"])])
assert np.all([t['name'] == np.array([b"Jackson", b"Miller", b"Miller"])])
assert np.all([t['tel'] == np.array([19, 15, 12])])
def test_multiple_with_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(table_types.Column(
name='firstname',
data=[str(x) for x in ["Max", "Jo", "John"]]))
t.add_column(table_types.Column(
name='name',
data=[str(x) for x in ["Miller", "Miller", "Jackson"]]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
t.sort(['name', 'firstname'])
assert np.all([t['firstname'] == np.array(
[str(x) for x in ["John", "Jo", "Max"]])])
assert np.all([t['name'] == np.array(
[str(x) for x in ["Jackson", "Miller", "Miller"]])])
assert np.all([t['tel'] == np.array([19, 15, 12])])
def test_argsort(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t.argsort() == t.as_array().argsort())
i0 = t.argsort('a')
i1 = t.as_array().argsort(order=['a'])
assert np.all(t['a'][i0] == t['a'][i1])
i0 = t.argsort(['a', 'b'])
i1 = t.as_array().argsort(order=['a', 'b'])
assert np.all(t['a'][i0] == t['a'][i1])
assert np.all(t['b'][i0] == t['b'][i1])
def test_argsort_bytes(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"]))
t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0]))
def test_argsort_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(table_types.Column(
name='firstname',
data=[str(x) for x in ["Max", "Jo", "John"]]))
t.add_column(table_types.Column(
name='name',
data=[str(x) for x in ["Miller", "Miller", "Jackson"]]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0]))
def test_rebuild_column_view_then_rename(self, table_types):
"""
Issue #2039 where renaming fails after any method that calls
_rebuild_table_column_view (this includes sort and add_row).
"""
t = table_types.Table([[1]], names=('a',))
assert t.colnames == ['a']
assert t.dtype.names == ('a',)
t.add_row((2,))
assert t.colnames == ['a']
assert t.dtype.names == ('a',)
t.rename_column('a', 'b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
t.sort('b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
t.rename_column('b', 'c')
assert t.colnames == ['c']
assert t.dtype.names == ('c',)
@pytest.mark.usefixtures('table_types')
class TestIterator():
def test_iterator(self, table_types):
d = np.array([(2, 1),
(3, 6),
(4, 5)], dtype=[(str('a'), 'i4'), (str('b'), 'i4')])
t = table_types.Table(d)
if t.masked:
with pytest.raises(ValueError):
t[0] == d[0]
else:
for row, np_row in zip(t, d):
assert np.all(row == np_row)
@pytest.mark.usefixtures('table_types')
class TestSetMeta():
def test_set_meta(self, table_types):
d = table_types.Table(names=('a', 'b'))
d.meta['a'] = 1
d.meta['b'] = 1
d.meta['c'] = 1
d.meta['d'] = 1
assert list(d.meta.keys()) == ['a', 'b', 'c', 'd']
@pytest.mark.usefixtures('table_types')
class TestConvertNumpyArray():
def test_convert_numpy_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=('a', 'b'))
np_data = np.array(d)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert np_data is not d.as_array()
assert d.colnames == list(np_data.dtype.names)
np_data = np.array(d, copy=False)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert d.colnames == list(np_data.dtype.names)
with pytest.raises(ValueError):
np_data = np.array(d, dtype=[(str('c'), 'i8'), (str('d'), 'i8')])
def test_as_array_byteswap(self, table_types):
"""Test for https://github.com/astropy/astropy/pull/4080"""
byte_orders = ('>', '<')
native_order = byte_orders[sys.byteorder == 'little']
for order in byte_orders:
col = table_types.Column([1.0, 2.0], name='a', dtype=order + 'f8')
t = table_types.Table([col])
arr = t.as_array()
assert arr['a'].dtype.byteorder in (native_order, '=')
arr = t.as_array(keep_byteorder=True)
if order == native_order:
assert arr['a'].dtype.byteorder in (order, '=')
else:
assert arr['a'].dtype.byteorder == order
def test_byteswap_fits_array(self, table_types):
"""
Test for https://github.com/astropy/astropy/pull/4080, demonstrating
that FITS tables are converted to native byte order.
"""
non_native_order = ('>', '<')[sys.byteorder != 'little']
filename = get_pkg_data_filename('data/tb.fits',
'astropy.io.fits.tests')
t = table_types.Table.read(filename)
arr = t.as_array()
for idx in range(len(arr.dtype)):
assert arr.dtype[idx].byteorder != non_native_order
with fits.open(filename, character_as_bytes=True) as hdul:
data = hdul[1].data
for colname in data.columns.names:
assert np.all(data[colname] == arr[colname])
arr2 = t.as_array(keep_byteorder=True)
for colname in data.columns.names:
assert (data[colname].dtype.byteorder ==
arr2[colname].dtype.byteorder)
def _assert_copies(t, t2, deep=True):
assert t.colnames == t2.colnames
np.testing.assert_array_equal(t.as_array(), t2.as_array())
assert t.meta == t2.meta
for col, col2 in zip(t.columns.values(), t2.columns.values()):
if deep:
assert not np.may_share_memory(col, col2)
else:
assert np.may_share_memory(col, col2)
def test_copy():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'])
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_masked():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'], masked=True,
meta={'name': 'test'})
t['x'].mask == [True, False, True]
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_protocol():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'])
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
_assert_copies(t, t2, deep=False)
_assert_copies(t, t3)
def test_disallow_inequality_comparisons():
"""
Regression test for #828 - disallow comparison operators on whole Table
"""
t = table.Table()
with pytest.raises(TypeError):
t > 2
with pytest.raises(TypeError):
t < 1.1
with pytest.raises(TypeError):
t >= 5.5
with pytest.raises(TypeError):
t <= -1.1
def test_equality():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all((t == t2.as_array()) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t.as_array() == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
def test_equality_masked():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# Make into masked table
t = table.Table(t, masked=True)
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that masking a value causes the row to differ
t.mask['a'][0] = True
assert np.all((t == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([1, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all((t == t2.as_array()) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
@pytest.mark.xfail
def test_equality_masked_bug():
"""
This highlights a Numpy bug. Once it works, it can be moved into the
test_equality_masked test. Related Numpy bug report:
https://github.com/numpy/numpy/issues/3840
"""
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
t = table.Table(t, masked=True)
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
assert np.all((t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
from ...utils.tests.test_metadata import MetaBaseTest
class TestMetaTable(MetaBaseTest):
test_class = table.Table
args = ()
def test_unicode_content():
# If we don't have unicode literals then return
if isinstance('', bytes):
return
# Define unicode literals
string_a = 'астрономическая питона'
string_b = 'миллиарды световых лет'
a = table.Table(
[[string_a, 2],
[string_b, 3]],
names=('a', 'b'))
assert string_a in str(a)
# This only works because the coding of this file is utf-8, which
# matches the default encoding of Table.__str__
assert string_a.encode('utf-8') in bytes(a)
def test_unicode_policy():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
assert_follows_unicode_guidelines(t)
def test_unicode_bytestring_conversion(table_types):
t = table_types.Table([['abc'], ['def'], [1]], dtype=('S', 'U', 'i'))
assert t['col0'].dtype.kind == 'S'
assert t['col1'].dtype.kind == 'U'
assert t['col2'].dtype.kind == 'i'
t1 = t.copy()
t1.convert_unicode_to_bytestring()
assert t1['col0'].dtype.kind == 'S'
assert t1['col1'].dtype.kind == 'S'
assert t1['col2'].dtype.kind == 'i'
assert t1['col0'][0] == 'abc'
assert t1['col1'][0] == 'def'
assert t1['col2'][0] == 1
t1 = t.copy()
t1.convert_bytestring_to_unicode()
assert t1['col0'].dtype.kind == 'U'
assert t1['col1'].dtype.kind == 'U'
assert t1['col2'].dtype.kind == 'i'
assert t1['col0'][0] == str('abc')
assert t1['col1'][0] == str('def')
assert t1['col2'][0] == 1
def test_table_deletion():
"""
Regression test for the reference cycle discussed in
https://github.com/astropy/astropy/issues/2877
"""
deleted = set()
# A special table subclass which leaves a record when it is finalized
class TestTable(table.Table):
def __del__(self):
deleted.add(id(self))
t = TestTable({'a': [1, 2, 3]})
the_id = id(t)
assert t['a'].parent_table is t
del t
# Cleanup
gc.collect()
assert the_id in deleted
def test_nested_iteration():
"""
Regression test for issue 3358 where nested iteration over a single table fails.
"""
t = table.Table([[0, 1]], names=['a'])
out = []
for r1 in t:
for r2 in t:
out.append((r1['a'], r2['a']))
assert out == [(0, 0), (0, 1), (1, 0), (1, 1)]
def test_table_init_from_degenerate_arrays(table_types):
t = table_types.Table(np.array([]))
assert len(t.columns) == 0
with pytest.raises(ValueError):
t = table_types.Table(np.array(0))
t = table_types.Table(np.array([1, 2, 3]))
assert len(t.columns) == 3
@pytest.mark.skipif('not HAS_PANDAS')
class TestPandas:
def test_simple(self):
t = table.Table()
for endian in ['<', '>']:
for kind in ['f', 'i']:
for byte in ['2', '4', '8']:
dtype = np.dtype(endian + kind + byte)
x = np.array([1, 2, 3], dtype=dtype)
t[endian + kind + byte] = x
t['u'] = ['a', 'b', 'c']
t['s'] = ['a', 'b', 'c']
d = t.to_pandas()
for column in t.columns:
if column == 'u':
assert np.all(t['u'] == np.array(['a', 'b', 'c']))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
elif column == 's':
assert np.all(t['s'] == np.array(['a', 'b', 'c']))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
else:
# We should be able to compare exact values here
assert np.all(t[column] == d[column])
if t[column].dtype.byteorder in ('=', '|'):
assert d[column].dtype == t[column].dtype
else:
assert d[column].dtype == t[column].byteswap().newbyteorder().dtype
# Regression test for astropy/astropy#1156 - the following code gave a
# ValueError: Big-endian buffer not supported on little-endian
# compiler. We now automatically swap the endian-ness to native order
# upon adding the arrays to the data frame.
d[['<i4', '>i4']]
d[['<f4', '>f4']]
t2 = table.Table.from_pandas(d)
for column in t.columns:
if column in ('u', 's'):
assert np.all(t[column] == t2[column])
else:
assert_allclose(t[column], t2[column])
if t[column].dtype.byteorder in ('=', '|'):
assert t[column].dtype == t2[column].dtype
else:
assert t[column].byteswap().newbyteorder().dtype == t2[column].dtype
def test_2d(self):
t = table.Table()
t['a'] = [1, 2, 3]
t['b'] = np.ones((3, 2))
with pytest.raises(ValueError) as exc:
t.to_pandas()
assert exc.value.args[0] == "Cannot convert a table with multi-dimensional columns to a pandas DataFrame"
def test_mixin(self):
from ...coordinates import SkyCoord
t = table.Table()
t['c'] = SkyCoord([1, 2, 3], [4, 5, 6], unit='deg')
with pytest.raises(ValueError) as exc:
t.to_pandas()
assert exc.value.args[0] == "Cannot convert a table with mixin columns to a pandas DataFrame"
def test_masking(self):
t = table.Table(masked=True)
t['a'] = [1, 2, 3]
t['a'].mask = [True, False, True]
t['b'] = [1., 2., 3.]
t['b'].mask = [False, False, True]
t['u'] = ['a', 'b', 'c']
t['u'].mask = [False, True, False]
t['s'] = ['a', 'b', 'c']
t['s'].mask = [False, True, False]
d = t.to_pandas()
t2 = table.Table.from_pandas(d)
for name, column in t.columns.items():
assert np.all(column.data == t2[name].data)
assert np.all(column.mask == t2[name].mask)
# Masked integer type comes back as float. Nothing we can do about this.
if column.dtype.kind == 'i':
assert t2[name].dtype.kind == 'f'
else:
if column.dtype.byteorder in ('=', '|'):
assert column.dtype == t2[name].dtype
else:
assert column.byteswap().newbyteorder().dtype == t2[name].dtype
@pytest.mark.usefixtures('table_types')
class TestReplaceColumn(SetupData):
def test_fail_replace_column(self, table_types):
"""Raise exception when trying to replace column via table.columns object"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t.columns['a'] = [1, 2, 3]
with pytest.raises(ValueError):
t.replace_column('not there', [1, 2, 3])
def test_replace_column(self, table_types):
"""Replace existing column with a new column"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
ta = t['a']
tb = t['b']
vals = [1.2, 3.4, 5.6]
for col in (vals,
table_types.Column(vals),
table_types.Column(vals, name='a'),
table_types.Column(vals, name='b')):
t.replace_column('a', col)
assert np.all(t['a'] == vals)
assert t['a'] is not ta # New a column
assert t['b'] is tb # Original b column unchanged
assert t.colnames == ['a', 'b']
assert t['a'].meta == {}
assert t['a'].format is None
def test_replace_index_column(self, table_types):
"""Replace index column and generate expected exception"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_index('a')
with pytest.raises(ValueError) as err:
t.replace_column('a', [1, 2, 3])
assert err.value.args[0] == 'cannot replace a table index column'
class Test__Astropy_Table__():
"""
Test initializing a Table subclass from a table-like object that
implements the __astropy_table__ interface method.
"""
class SimpleTable:
def __init__(self):
self.columns = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9] * u.m]
self.names = ['a', 'b', 'c']
self.meta = OrderedDict([('a', 1), ('b', 2)])
def __astropy_table__(self, cls, copy, **kwargs):
a, b, c = self.columns
c.info.name = 'c'
cols = [table.Column(a, name='a'),
table.MaskedColumn(b, name='b'),
c]
names = [col.info.name for col in cols]
return cls(cols, names=names, copy=copy, meta=kwargs or self.meta)
def test_simple_1(self):
"""Make a SimpleTable and convert to Table, QTable with copy=False, True"""
for table_cls in (table.Table, table.QTable):
col_c_class = u.Quantity if table_cls is table.QTable else table.MaskedColumn
for cpy in (False, True):
st = self.SimpleTable()
# Test putting in a non-native kwarg `extra_meta` to Table initializer
t = table_cls(st, copy=cpy, extra_meta='extra!')
assert t.colnames == ['a', 'b', 'c']
assert t.meta == {'extra_meta': 'extra!'}
assert np.all(t['a'] == st.columns[0])
assert np.all(t['b'] == st.columns[1])
vals = t['c'].value if table_cls is table.QTable else t['c']
assert np.all(st.columns[2].value == vals)
assert isinstance(t['a'], table.MaskedColumn)
assert isinstance(t['b'], table.MaskedColumn)
assert isinstance(t['c'], col_c_class)
assert t['c'].unit is u.m
assert type(t) is table_cls
# Copy being respected?
t['a'][0] = 10
assert st.columns[0][0] == 1 if cpy else 10
def test_simple_2(self):
"""Test converting a SimpleTable and changing column names and types"""
st = self.SimpleTable()
dtypes = [np.int32, np.float32, np.float16]
names = ['a', 'b', 'c']
t = table.Table(st, dtype=dtypes, names=names, meta=OrderedDict([('c', 3)]))
assert t.colnames == names
assert all(col.dtype.type is dtype
for col, dtype in zip(t.columns.values(), dtypes))
# The supplied meta is ignored. This is consistent with current
# behavior when initializing from an existing astropy Table.
assert t.meta == st.meta
def test_kwargs_exception(self):
"""If extra kwargs provided but without initializing with a table-like
object, exception is raised"""
with pytest.raises(TypeError) as err:
table.Table([[1]], extra_meta='extra!')
assert '__init__() got unexpected keyword argument' in str(err)
def test_replace_column_qtable():
"""Replace existing Quantity column with a new column in a QTable"""
a = [1, 2, 3] * u.m
b = [4, 5, 6]
t = table.QTable([a, b], names=['a', 'b'])
ta = t['a']
tb = t['b']
ta.info.meta = {'aa': [0, 1, 2, 3, 4]}
ta.info.format = '%f'
t.replace_column('a', a.to('cm'))
assert np.all(t['a'] == ta)
assert t['a'] is not ta # New a column
assert t['b'] is tb # Original b column unchanged
assert t.colnames == ['a', 'b']
assert t['a'].info.meta is None
assert t['a'].info.format is None
def test_replace_update_column_via_setitem():
"""
Test table update like ``t['a'] = value``. This leverages off the
already well-tested ``replace_column`` and in-place update
``t['a'][:] = value``, so this testing is fairly light.
"""
a = [1, 2] * u.m
b = [3, 4]
t = table.QTable([a, b], names=['a', 'b'])
assert isinstance(t['a'], u.Quantity)
# Inplace update
ta = t['a']
t['a'] = 5 * u.m
assert np.all(t['a'] == [5, 5] * u.m)
assert t['a'] is ta
# Replace
t['a'] = [5, 6]
assert np.all(t['a'] == [5, 6])
assert isinstance(t['a'], table.Column)
assert t['a'] is not ta
def test_replace_update_column_via_setitem_warnings_normal():
"""
Test warnings related to table replace change in #5556:
Normal warning-free replace
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = 0 # in-place update
assert len(w) == 0
t['a'] = [10, 20, 30] # replace column
assert len(w) == 0
def test_replace_update_column_via_setitem_warnings_slice():
"""
Test warnings related to table replace change in #5556:
Replace a slice, one warning.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t2 = t[:2]
t2['a'] = 0 # in-place slice update
assert np.all(t['a'] == [0, 0, 3])
assert len(w) == 0
t2['a'] = [10, 20] # replace slice
assert len(w) == 1
assert "replaced column 'a' which looks like an array slice" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_attributes():
"""
Test warnings related to table replace change in #5556:
Lost attributes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
t['a'].unit = 'm'
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert len(w) == 1
assert "replaced column 'a' and column attributes ['unit']" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_refcount():
"""
Test warnings related to table replace change in #5556:
Reference count changes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
ta = t['a'] # Generate an extra reference to original column
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert len(w) == 1
assert "replaced column 'a' and the number of references" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_always():
"""
Test warnings related to table replace change in #5556:
Test 'always' setting that raises warning for any replace.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings', ['always']):
t['a'] = 0 # in-place slice update
assert len(w) == 0
from inspect import currentframe, getframeinfo
frameinfo = getframeinfo(currentframe())
t['a'] = [10, 20, 30] # replace column
assert len(w) == 1
assert "replaced column 'a'" == str(w[0].message)
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert w[0].category is table.TableReplaceWarning
assert 'test_table' in w[0].filename
def test_replace_update_column_via_setitem_replace_inplace():
"""
Test the replace_inplace config option related to #5556. In this
case no replace is done.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
ta = t['a']
t['a'].unit = 'm'
with catch_warnings() as w:
with table.conf.set_temp('replace_inplace', True):
with table.conf.set_temp('replace_warnings',
['always', 'refcount', 'attributes', 'slice']):
t['a'] = 0 # in-place update
assert len(w) == 0
assert ta is t['a']
t['a'] = [10, 20, 30] # normally replaces column, but not now
assert len(w) == 0
assert ta is t['a']
assert np.all(t['a'] == [10, 20, 30])
def test_primary_key_is_inherited():
"""Test whether a new Table inherits the primary_key attribute from
its parent Table. Issue #4672"""
t = table.Table([(2, 3, 2, 1), (8, 7, 6, 5)], names=('a', 'b'))
t.add_index('a')
original_key = t.primary_key
# can't test if tuples are equal, so just check content
assert original_key[0] is 'a'
t2 = t[:]
t3 = t.copy()
t4 = table.Table(t)
# test whether the reference is the same in the following
assert original_key == t2.primary_key
assert original_key == t3.primary_key
assert original_key == t4.primary_key
# just test one element, assume rest are equal if assert passes
assert t.loc[1] == t2.loc[1]
assert t.loc[1] == t3.loc[1]
assert t.loc[1] == t4.loc[1]
def test_qtable_read_for_ipac_table_with_char_columns():
'''Test that a char column of a QTable is assigned no unit and not
a dimensionless unit, otherwise conversion of reader output to
QTable fails.'''
t1 = table.QTable([["A"]], names="B")
out = StringIO()
t1.write(out, format="ascii.ipac")
t2 = table.QTable.read(out.getvalue(), format="ascii.ipac", guess=False)
assert t2["B"].unit is None
|
15f6940f3e3449f37fec4e5f5694806aa5a831ec47c2c84b1eef97748aed61de | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from io import StringIO
from collections import OrderedDict
from copy import deepcopy
import numpy as np
import pytest
from ... import units as u
from ... import time
from ... import coordinates
from ... import table
from ..info import serialize_method_as
from ...utils.data_info import data_info_factory, dtype_info_name
from ..table_helpers import simple_table
def test_table_info_attributes(table_types):
"""
Test the info() method of printing a summary of table column attributes
"""
a = np.array([1, 2, 3], dtype='int32')
b = np.array([1, 2, 3], dtype='float32')
c = np.array(['a', 'c', 'e'], dtype='|S1')
t = table_types.Table([a, b, c], names=['a', 'b', 'c'])
# Minimal output for a typical table
tinfo = t.info(out=None)
subcls = ['class'] if table_types.Table.__name__ == 'MyTable' else []
assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format',
'description', 'class', 'n_bad', 'length']
assert np.all(tinfo['name'] == ['a', 'b', 'c'])
assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1')])
if subcls:
assert np.all(tinfo['class'] == ['MyColumn'] * 3)
# All output fields including a mixin column
t['d'] = [1, 2, 3] * u.m
t['d'].description = 'quantity'
t['a'].format = '%02d'
t['e'] = time.Time([1, 2, 3], format='mjd')
t['e'].info.description = 'time'
t['f'] = coordinates.SkyCoord([1, 2, 3], [1, 2, 3], unit='deg')
t['f'].info.description = 'skycoord'
tinfo = t.info(out=None)
assert np.all(tinfo['name'] == 'a b c d e f'.split())
assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1'), 'float64',
'object', 'object'])
assert np.all(tinfo['unit'] == ['', '', '', 'm', '', 'deg,deg'])
assert np.all(tinfo['format'] == ['%02d', '', '', '', '', ''])
assert np.all(tinfo['description'] == ['', '', '', 'quantity', 'time', 'skycoord'])
cls = t.ColumnClass.__name__
assert np.all(tinfo['class'] == [cls, cls, cls, cls, 'Time', 'SkyCoord'])
# Test that repr(t.info) is same as t.info()
out = StringIO()
t.info(out=out)
assert repr(t.info) == out.getvalue()
def test_table_info_stats(table_types):
"""
Test the info() method of printing a summary of table column statistics
"""
a = np.array([1, 2, 1, 2], dtype='int32')
b = np.array([1, 2, 1, 2], dtype='float32')
c = np.array(['a', 'c', 'e', 'f'], dtype='|S1')
d = time.Time([1, 2, 1, 2], format='mjd')
t = table_types.Table([a, b, c, d], names=['a', 'b', 'c', 'd'])
# option = 'stats'
masked = 'masked=True ' if t.masked else ''
out = StringIO()
t.info('stats', out=out)
table_header_line = '<{0} {1}length=4>'.format(t.__class__.__name__, masked)
exp = [table_header_line,
'name mean std min max',
'---- ---- --- --- ---',
' a 1.5 0.5 1 2',
' b 1.5 0.5 1.0 2.0',
' c -- -- -- --',
' d -- -- 1.0 2.0']
assert out.getvalue().splitlines() == exp
# option = ['attributes', 'stats']
tinfo = t.info(['attributes', 'stats'], out=None)
assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format', 'description',
'class', 'mean', 'std', 'min', 'max', 'n_bad', 'length']
assert np.all(tinfo['mean'] == ['1.5', '1.5', '--', '--'])
assert np.all(tinfo['std'] == ['0.5', '0.5', '--', '--'])
assert np.all(tinfo['min'] == ['1', '1.0', '--', '1.0'])
assert np.all(tinfo['max'] == ['2', '2.0', '--', '2.0'])
out = StringIO()
t.info('stats', out=out)
exp = [table_header_line,
'name mean std min max',
'---- ---- --- --- ---',
' a 1.5 0.5 1 2',
' b 1.5 0.5 1.0 2.0',
' c -- -- -- --',
' d -- -- 1.0 2.0']
assert out.getvalue().splitlines() == exp
# option = ['attributes', custom]
custom = data_info_factory(names=['sum', 'first'],
funcs=[np.sum, lambda col: col[0]])
out = StringIO()
tinfo = t.info(['attributes', custom], out=None)
assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format', 'description',
'class', 'sum', 'first', 'n_bad', 'length']
assert np.all(tinfo['name'] == ['a', 'b', 'c', 'd'])
assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1'), 'object'])
assert np.all(tinfo['sum'] == ['6', '6.0', '--', '--'])
assert np.all(tinfo['first'] == ['1', '1.0', 'a', '1.0'])
def test_data_info():
"""
Test getting info for just a column.
"""
cols = [table.Column([1.0, 2.0, np.nan], name='name',
description='description', unit='m/s'),
table.MaskedColumn([1.0, 2.0, 3.0], name='name',
description='description', unit='m/s',
mask=[False, False, True])]
for c in cols:
# Test getting the full ordered dict
cinfo = c.info(out=None)
assert cinfo == OrderedDict([('name', 'name'),
('dtype', 'float64'),
('shape', ''),
('unit', 'm / s'),
('format', ''),
('description', 'description'),
('class', type(c).__name__),
('n_bad', 1),
('length', 3)])
# Test the console (string) version which omits trivial values
out = StringIO()
c.info(out=out)
exp = ['name = name',
'dtype = float64',
'unit = m / s',
'description = description',
'class = {0}'.format(type(c).__name__),
'n_bad = 1',
'length = 3']
assert out.getvalue().splitlines() == exp
# repr(c.info) gives the same as c.info()
assert repr(c.info) == out.getvalue()
# Test stats info
cinfo = c.info('stats', out=None)
assert cinfo == OrderedDict([('name', 'name'),
('mean', '1.5'),
('std', '0.5'),
('min', '1.0'),
('max', '2.0'),
('n_bad', 1),
('length', 3)])
def test_data_info_subclass():
class Column(table.Column):
"""
Confusingly named Column on purpose, but that is legal.
"""
pass
for data in ([], [1, 2]):
c = Column(data, dtype='int64')
cinfo = c.info(out=None)
assert cinfo == OrderedDict([('dtype', 'int64'),
('shape', ''),
('unit', ''),
('format', ''),
('description', ''),
('class', 'Column'),
('n_bad', 0),
('length', len(data))])
def test_scalar_info():
"""
Make sure info works with scalar values
"""
c = time.Time('2000:001')
cinfo = c.info(out=None)
assert cinfo['n_bad'] == 0
assert 'length' not in cinfo
def test_empty_table():
t = table.Table()
out = StringIO()
t.info(out=out)
exp = ['<Table length=0>', '<No columns>']
assert out.getvalue().splitlines() == exp
def test_class_attribute():
"""
Test that class info column is suppressed only for identical non-mixin
columns.
"""
vals = [[1] * u.m, [2] * u.m]
texp = ['<Table length=1>',
'name dtype unit',
'---- ------- ----',
'col0 float64 m',
'col1 float64 m']
qexp = ['<QTable length=1>',
'name dtype unit class ',
'---- ------- ---- --------',
'col0 float64 m Quantity',
'col1 float64 m Quantity']
for table_cls, exp in ((table.Table, texp),
(table.QTable, qexp)):
t = table_cls(vals)
out = StringIO()
t.info(out=out)
assert out.getvalue().splitlines() == exp
def test_ignore_warnings():
t = table.Table([[np.nan, np.nan]])
with warnings.catch_warnings(record=True) as warns:
t.info('stats', out=None)
assert len(warns) == 0
def test_no_deprecation_warning():
# regression test for #5459, where numpy deprecation warnings were
# emitted unnecessarily.
t = simple_table()
with warnings.catch_warnings(record=True) as warns:
t.info()
assert len(warns) == 0
def test_lost_parent_error():
c = table.Column([1, 2, 3], name='a')
with pytest.raises(AttributeError) as err:
c[:].info.name
assert 'failed access "info" attribute' in str(err)
def test_info_serialize_method():
"""
Unit test of context manager to set info.serialize_method. Normally just
used to set this for writing a Table to file (FITS, ECSV, HDF5).
"""
t = table.Table({'tm': time.Time([1, 2], format='cxcsec'),
'sc': coordinates.SkyCoord([1, 2], [1, 2], unit='deg'),
'mc': table.MaskedColumn([1, 2], mask=[True, False]),
'mc2': table.MaskedColumn([1, 2], mask=[True, False])}
)
origs = {}
for name in ('tm', 'mc', 'mc2'):
origs[name] = deepcopy(t[name].info.serialize_method)
# Test setting by name and getting back to originals
with serialize_method_as(t, {'tm': 'test_tm', 'mc': 'test_mc'}):
for name in ('tm', 'mc'):
assert all(t[name].info.serialize_method[key] == 'test_' + name
for key in t[name].info.serialize_method)
assert t['mc2'].info.serialize_method == origs['mc2']
assert not hasattr(t['sc'].info, 'serialize_method')
for name in ('tm', 'mc', 'mc2'):
assert t[name].info.serialize_method == origs[name] # dict compare
assert not hasattr(t['sc'].info, 'serialize_method')
# Test setting by name and class, where name takes precedence. Also
# test that it works for subclasses.
with serialize_method_as(t, {'tm': 'test_tm', 'mc': 'test_mc',
table.Column: 'test_mc2'}):
for name in ('tm', 'mc', 'mc2'):
assert all(t[name].info.serialize_method[key] == 'test_' + name
for key in t[name].info.serialize_method)
assert not hasattr(t['sc'].info, 'serialize_method')
for name in ('tm', 'mc', 'mc2'):
assert t[name].info.serialize_method == origs[name] # dict compare
assert not hasattr(t['sc'].info, 'serialize_method')
# Test supplying a single string that all applies to all columns with
# a serialize_method.
with serialize_method_as(t, 'test'):
for name in ('tm', 'mc', 'mc2'):
assert all(t[name].info.serialize_method[key] == 'test'
for key in t[name].info.serialize_method)
assert not hasattr(t['sc'].info, 'serialize_method')
for name in ('tm', 'mc', 'mc2'):
assert t[name].info.serialize_method == origs[name] # dict compare
assert not hasattr(t['sc'].info, 'serialize_method')
def test_info_serialize_method_exception():
"""
Unit test of context manager to set info.serialize_method. Normally just
used to set this for writing a Table to file (FITS, ECSV, HDF5).
"""
t = simple_table(masked=True)
origs = deepcopy(t['a'].info.serialize_method)
try:
with serialize_method_as(t, 'test'):
assert all(t['a'].info.serialize_method[key] == 'test'
for key in t['a'].info.serialize_method)
raise ZeroDivisionError()
except ZeroDivisionError:
pass
assert t['a'].info.serialize_method == origs # dict compare
|
4dcc847a5c778b756c68bb1402d8e4a58e800f1156865d73d6d5b1f6384b7889 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from .test_table import SetupData
from ..bst import BST, FastRBT, FastBST
from ..sorted_array import SortedArray
from ..soco import SCEngine, HAS_SOCO
from ..table import QTable, Row, Table
from ... import units as u
from ...time import Time
from ..column import BaseColumn
try:
import bintrees
except ImportError:
HAS_BINTREES = False
else:
HAS_BINTREES = True
if HAS_BINTREES:
available_engines = [BST, FastBST, FastRBT, SortedArray]
else:
available_engines = [BST, SortedArray]
if HAS_SOCO:
available_engines.append(SCEngine)
@pytest.fixture(params=available_engines)
def engine(request):
return request.param
_col = [1, 2, 3, 4, 5]
@pytest.fixture(params=[
_col,
u.Quantity(_col),
Time(_col, format='jyear'),
])
def main_col(request):
return request.param
def assert_col_equal(col, array):
if isinstance(col, Time):
assert np.all(col == Time(array, format='jyear'))
else:
assert np.all(col == col.__class__(array))
@pytest.mark.usefixtures('table_types')
class TestIndex(SetupData):
def _setup(self, main_col, table_types):
super()._setup(table_types)
self.main_col = main_col
if isinstance(main_col, u.Quantity):
self._table_type = QTable
if not isinstance(main_col, list):
self._column_type = lambda x: x # don't change mixin type
self.mutable = isinstance(main_col, (list, u.Quantity))
def make_col(self, name, lst):
return self._column_type(lst, name=name)
def make_val(self, val):
if isinstance(self.main_col, Time):
return Time(val, format='jyear')
return val
@property
def t(self):
if not hasattr(self, '_t'):
self._t = self._table_type()
self._t['a'] = self._column_type(self.main_col)
self._t['b'] = self._column_type([4.0, 5.1, 6.2, 7.0, 1.1])
self._t['c'] = self._column_type(['7', '8', '9', '10', '11'])
return self._t
@pytest.mark.parametrize("composite", [False, True])
def test_table_index(self, main_col, table_types, composite, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index(('a', 'b') if composite else 'a', engine=engine)
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
if not self.mutable:
return
# test altering table columns
t['a'][0] = 4
t.add_row((6, 6.0, '7'))
t['a'][3] = 10
t.remove_row(2)
t.add_row((4, 5.0, '9'))
assert_col_equal(t['a'], np.array([4, 2, 10, 5, 6, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 7.0, 1.1, 6.0, 5.0]))
assert np.all(t['c'].data == np.array(['7', '8', '10', '11', '7', '9']))
index = t.indices[0]
l = list(index.data.items())
if composite:
assert np.all(l == [((2, 5.1), [1]),
((4, 4.0), [0]),
((4, 5.0), [5]),
((5, 1.1), [3]),
((6, 6.0), [4]),
((10, 7.0), [2])])
else:
assert np.all(l == [((2,), [1]),
((4,), [0, 5]),
((5,), [3]),
((6,), [4]),
((10,), [2])])
t.remove_indices('a')
assert len(t.indices) == 0
def test_table_slicing(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index('a', engine=engine)
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
for slice_ in ([0, 2], np.array([0, 2])):
t2 = t[slice_]
# t2 should retain an index on column 'a'
assert len(t2.indices) == 1
assert_col_equal(t2['a'], [1, 3])
# the index in t2 should reorder row numbers after slicing
assert np.all(t2.indices[0].sorted_data() == [0, 1])
# however, this index should be a deep copy of t1's index
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
def test_remove_rows(self, main_col, table_types, engine):
self._setup(main_col, table_types)
if not self.mutable:
return
t = self.t
t.add_index('a', engine=engine)
# remove individual row
t2 = t.copy()
t2.remove_rows(2)
assert_col_equal(t2['a'], [1, 2, 4, 5])
assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3])
# remove by list, ndarray, or slice
for cut in ([0, 2, 4], np.array([0, 2, 4]), slice(0, 5, 2)):
t2 = t.copy()
t2.remove_rows(cut)
assert_col_equal(t2['a'], [2, 4])
assert np.all(t2.indices[0].sorted_data() == [0, 1])
with pytest.raises(ValueError):
t.remove_rows((0, 2, 4))
def test_col_get_slice(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index('a', engine=engine)
# get slice
t2 = t[1:3] # table slice
assert_col_equal(t2['a'], [2, 3])
assert np.all(t2.indices[0].sorted_data() == [0, 1])
col_slice = t['a'][1:3]
assert_col_equal(col_slice, [2, 3])
# true column slices discard indices
if isinstance(t['a'], BaseColumn):
assert len(col_slice.info.indices) == 0
# take slice of slice
t2 = t[::2]
assert_col_equal(t2['a'], np.array([1, 3, 5]))
t3 = t2[::-1]
assert_col_equal(t3['a'], np.array([5, 3, 1]))
assert np.all(t3.indices[0].sorted_data() == [2, 1, 0])
t3 = t2[:2]
assert_col_equal(t3['a'], np.array([1, 3]))
assert np.all(t3.indices[0].sorted_data() == [0, 1])
# out-of-bound slices
for t_empty in (t2[3:], t2[2:1], t3[2:]):
assert len(t_empty['a']) == 0
assert np.all(t_empty.indices[0].sorted_data() == [])
if self.mutable:
# get boolean mask
mask = t['a'] % 2 == 1
t2 = t[mask]
assert_col_equal(t2['a'], [1, 3, 5])
assert np.all(t2.indices[0].sorted_data() == [0, 1, 2])
def test_col_set_slice(self, main_col, table_types, engine):
self._setup(main_col, table_types)
if not self.mutable:
return
t = self.t
t.add_index('a', engine=engine)
# set slice
t2 = t.copy()
t2['a'][1:3] = np.array([6, 7])
assert_col_equal(t2['a'], np.array([1, 6, 7, 4, 5]))
assert np.all(t2.indices[0].sorted_data() == [0, 3, 4, 1, 2])
# change original table via slice reference
t2 = t.copy()
t3 = t2[1:3]
assert_col_equal(t3['a'], np.array([2, 3]))
assert np.all(t3.indices[0].sorted_data() == [0, 1])
t3['a'][0] = 5
assert_col_equal(t3['a'], np.array([5, 3]))
assert_col_equal(t2['a'], np.array([1, 5, 3, 4, 5]))
assert np.all(t3.indices[0].sorted_data() == [1, 0])
assert np.all(t2.indices[0].sorted_data() == [0, 2, 3, 1, 4])
# set boolean mask
t2 = t.copy()
mask = t['a'] % 2 == 1
t2['a'][mask] = 0.
assert_col_equal(t2['a'], [0, 2, 0, 4, 0])
assert np.all(t2.indices[0].sorted_data() == [0, 2, 4, 1, 3])
def test_multiple_slices(self, main_col, table_types, engine):
self._setup(main_col, table_types)
if not self.mutable:
return
t = self.t
t.add_index('a', engine=engine)
for i in range(6, 51):
t.add_row((i, 1.0, 'A'))
assert_col_equal(t['a'], [i for i in range(1, 51)])
assert np.all(t.indices[0].sorted_data() == [i for i in range(50)])
evens = t[::2]
assert np.all(evens.indices[0].sorted_data() == [i for i in range(25)])
reverse = evens[::-1]
index = reverse.indices[0]
assert (index.start, index.stop, index.step) == (48, -2, -2)
assert np.all(index.sorted_data() == [i for i in range(24, -1, -1)])
# modify slice of slice
reverse[-10:] = 0
expected = np.array([i for i in range(1, 51)])
expected[:20][expected[:20] % 2 == 1] = 0
assert_col_equal(t['a'], expected)
assert_col_equal(evens['a'], expected[::2])
assert_col_equal(reverse['a'], expected[::2][::-1])
# first ten evens are now zero
assert np.all(t.indices[0].sorted_data() ==
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
1, 3, 5, 7, 9, 11, 13, 15, 17, 19]
+ [i for i in range(20, 50)])
assert np.all(evens.indices[0].sorted_data() == [i for i in range(25)])
assert np.all(reverse.indices[0].sorted_data() ==
[i for i in range(24, -1, -1)])
# try different step sizes of slice
t2 = t[1:20:2]
assert_col_equal(t2['a'], [2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
assert np.all(t2.indices[0].sorted_data() == [i for i in range(10)])
t3 = t2[::3]
assert_col_equal(t3['a'], [2, 8, 14, 20])
assert np.all(t3.indices[0].sorted_data() == [0, 1, 2, 3])
t4 = t3[2::-1]
assert_col_equal(t4['a'], [14, 8, 2])
assert np.all(t4.indices[0].sorted_data() == [2, 1, 0])
def test_sort(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t[::-1] # reverse table
assert_col_equal(t['a'], [5, 4, 3, 2, 1])
t.add_index('a', engine=engine)
assert np.all(t.indices[0].sorted_data() == [4, 3, 2, 1, 0])
if not self.mutable:
return
# sort table by column a
t2 = t.copy()
t2.sort('a')
assert_col_equal(t2['a'], [1, 2, 3, 4, 5])
assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4])
# sort table by primary key
t2 = t.copy()
t2.sort()
assert_col_equal(t2['a'], [1, 2, 3, 4, 5])
assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4])
def test_insert_row(self, main_col, table_types, engine):
self._setup(main_col, table_types)
if not self.mutable:
return
t = self.t
t.add_index('a', engine=engine)
t.insert_row(2, (6, 1.0, '12'))
assert_col_equal(t['a'], [1, 2, 6, 3, 4, 5])
assert np.all(t.indices[0].sorted_data() == [0, 1, 3, 4, 5, 2])
t.insert_row(1, (0, 4.0, '13'))
assert_col_equal(t['a'], [1, 0, 2, 6, 3, 4, 5])
assert np.all(t.indices[0].sorted_data() == [1, 0, 2, 4, 5, 6, 3])
def test_index_modes(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index('a', engine=engine)
# first, no special mode
assert len(t[[1, 3]].indices) == 1
assert len(t[::-1].indices) == 1
assert len(self._table_type(t).indices) == 1
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
t2 = t.copy()
# non-copy mode
with t.index_mode('discard_on_copy'):
assert len(t[[1, 3]].indices) == 0
assert len(t[::-1].indices) == 0
assert len(self._table_type(t).indices) == 0
assert len(t2.copy().indices) == 1 # mode should only affect t
# make sure non-copy mode is exited correctly
assert len(t[[1, 3]].indices) == 1
if not self.mutable:
return
# non-modify mode
with t.index_mode('freeze'):
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
t['a'][0] = 6
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
t.add_row((2, 1.5, '12'))
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
t.remove_rows([1, 3])
assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4])
assert_col_equal(t['a'], [6, 3, 5, 2])
# mode should only affect t
assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4])
t2['a'][0] = 6
assert np.all(t2.indices[0].sorted_data() == [1, 2, 3, 4, 0])
# make sure non-modify mode is exited correctly
assert np.all(t.indices[0].sorted_data() == [3, 1, 2, 0])
if isinstance(t['a'], BaseColumn):
assert len(t['a'][::-1].info.indices) == 0
with t.index_mode('copy_on_getitem'):
assert len(t['a'][[1, 2]].info.indices) == 1
# mode should only affect t
assert len(t2['a'][[1, 2]].info.indices) == 0
assert len(t['a'][::-1].info.indices) == 0
assert len(t2['a'][::-1].info.indices) == 0
def test_index_retrieval(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index('a', engine=engine)
t.add_index(['a', 'c'], engine=engine)
assert len(t.indices) == 2
assert len(t.indices['a'].columns) == 1
assert len(t.indices['a', 'c'].columns) == 2
with pytest.raises(IndexError):
t.indices['b']
def test_col_rename(self, main_col, table_types, engine):
'''
Checks for a previous bug in which copying a Table
with different column names raised an exception.
'''
self._setup(main_col, table_types)
t = self.t
t.add_index('a', engine=engine)
t2 = self._table_type(self.t, names=['d', 'e', 'f'])
assert len(t2.indices) == 1
def test_table_loc(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index('a', engine=engine)
t.add_index('b', engine=engine)
t2 = t.loc[self.make_val(3)] # single label, with primary key 'a'
assert_col_equal(t2['a'], [3])
assert isinstance(t2, Row)
# list search
t2 = t.loc[[self.make_val(1), self.make_val(4), self.make_val(2)]]
assert_col_equal(t2['a'], [1, 4, 2]) # same order as input list
if not isinstance(main_col, Time):
# ndarray search
t2 = t.loc[np.array([1, 4, 2])]
assert_col_equal(t2['a'], [1, 4, 2])
assert_col_equal(t2['a'], [1, 4, 2])
t2 = t.loc[self.make_val(3): self.make_val(5)] # range search
assert_col_equal(t2['a'], [3, 4, 5])
t2 = t.loc['b', 5.0:7.0]
assert_col_equal(t2['b'], [5.1, 6.2, 7.0])
# search by sorted index
t2 = t.iloc[0:2] # two smallest rows by column 'a'
assert_col_equal(t2['a'], [1, 2])
t2 = t.iloc['b', 2:] # exclude two smallest rows in column 'b'
assert_col_equal(t2['b'], [5.1, 6.2, 7.0])
for t2 in (t.loc[:], t.iloc[:]):
assert_col_equal(t2['a'], [1, 2, 3, 4, 5])
def test_table_loc_indices(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index('a', engine=engine)
t.add_index('b', engine=engine)
t2 = t.loc_indices[self.make_val(3)] # single label, with primary key 'a'
assert t2 == 2
# list search
t2 = t.loc_indices[[self.make_val(1), self.make_val(4), self.make_val(2)]]
for i, p in zip(t2,[1,4,2]): # same order as input list
assert i == p-1
def test_invalid_search(self, main_col, table_types, engine):
# using .loc and .loc_indices with a value not present should raise an exception
self._setup(main_col, table_types)
t = self.t
t.add_index('a')
with pytest.raises(KeyError):
t.loc[self.make_val(6)]
with pytest.raises(KeyError):
t.loc_indices[self.make_val(6)]
def test_copy_index_references(self, main_col, table_types, engine):
# check against a bug in which indices were given an incorrect
# column reference when copied
self._setup(main_col, table_types)
t = self.t
t.add_index('a')
t.add_index('b')
t2 = t.copy()
assert t2.indices['a'].columns[0] is t2['a']
assert t2.indices['b'].columns[0] is t2['b']
def test_unique_index(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = self.t
t.add_index('a', engine=engine, unique=True)
assert np.all(t.indices['a'].sorted_data() == [0, 1, 2, 3, 4])
if self.mutable:
with pytest.raises(ValueError):
t.add_row((5, 5.0, '9'))
def test_copy_indexed_table(self, table_types):
self._setup(_col, table_types)
t = self.t
t.add_index('a')
t.add_index(['a', 'b'])
for tp in (self._table_type(t), t.copy()):
assert len(t.indices) == len(tp.indices)
for index, indexp in zip(t.indices, tp.indices):
assert np.all(index.data.data == indexp.data.data)
assert index.data.data.colnames == indexp.data.data.colnames
def test_updating_row_byindex(self, main_col, table_types, engine):
self._setup(main_col, table_types)
t = Table([['a', 'b', 'c', 'd'], [2, 3, 4, 5], [3, 4, 5, 6]], names=('a', 'b', 'c'), meta={'name': 'first table'})
t.add_index('a', engine=engine)
t.add_index('b', engine=engine)
t.loc['c'] = ['g', 40, 50] # single label, with primary key 'a'
t2 = t[2]
assert list(t2) == ['g', 40, 50]
# list search
t.loc[['a', 'd', 'b']] = [['a', 20, 30], ['d', 50, 60], ['b', 30, 40]]
t2 = [['a', 20, 30], ['d', 50, 60], ['b', 30, 40]]
for i, p in zip(t2, [1, 4, 2]): # same order as input list
assert list(t[p-1]) == i
def test_invalid_updation(self, main_col, table_types, engine):
# using .loc and .loc_indices with a value not present should raise an exception
self._setup(main_col, table_types)
t = Table([[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]], names=('a', 'b', 'c'), meta={'name': 'first table'})
t.add_index('a')
with pytest.raises(ValueError):
t.loc[3] = [[1,2,3]]
with pytest.raises(ValueError):
t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5, 6]]
with pytest.raises(ValueError):
t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5, 6], [2, 3]]
with pytest.raises(ValueError):
t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5], [2, 3]]
|
49ead27bbf0466f5e112205d95b5625348336510c963b0e74a75572edc268d49 | import os
import re
from ..scripts import showtable
from ...utils.compat import NUMPY_LT_1_14
ROOT = os.path.abspath(os.path.dirname(__file__))
ASCII_ROOT = os.path.join(ROOT, '..', '..', 'io', 'ascii', 'tests')
FITS_ROOT = os.path.join(ROOT, '..', '..', 'io', 'fits', 'tests')
VOTABLE_ROOT = os.path.join(ROOT, '..', '..', 'io', 'votable', 'tests')
def test_missing_file(capsys):
showtable.main(['foobar.fits'])
out, err = capsys.readouterr()
assert err.startswith("ERROR: [Errno 2] No such file or directory: "
"'foobar.fits'")
def test_info(capsys):
showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'), '--info'])
out, err = capsys.readouterr()
assert out.splitlines() == ['<Table length=3>',
' name dtype ',
'------ -------',
'target bytes20',
' V_mag float32']
def test_stats(capsys):
showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'), '--stats'])
out, err = capsys.readouterr()
if NUMPY_LT_1_14:
expected = ['<Table length=3>',
' name mean std min max ',
'------ ------- ------- ---- ----',
'target -- -- -- --',
' V_mag 12.8667 1.72111 11.1 15.2']
else:
expected = ['<Table length=3>',
' name mean std min max ',
'------ --------- --------- ---- ----',
'target -- -- -- --',
' V_mag 12.86666[0-9]? 1.7211105 11.1 15.2']
out = out.splitlines()
assert out[:4] == expected[:4]
# Here we use re.match as in some cases one of the values above is
# platform-dependent.
assert re.match(expected[4], out[4]) is not None
def test_fits(capsys):
showtable.main([os.path.join(FITS_ROOT, 'data/table.fits')])
out, err = capsys.readouterr()
assert out.splitlines() == [' target V_mag',
'------- -----',
'NGC1001 11.1',
'NGC1002 12.3',
'NGC1003 15.2']
def test_fits_hdu(capsys):
showtable.main([os.path.join(FITS_ROOT, 'data/zerowidth.fits'),
'--hdu', 'AIPS OF'])
out, err = capsys.readouterr()
if NUMPY_LT_1_14:
assert out.startswith(
' TIME SOURCE ID ANTENNA NO. SUBARRAY FREQ ID ANT FLAG STATUS 1\n'
' DAYS \n'
'-------- --------- ----------- -------- ------- -------- --------\n'
'0.144387 1 10 1 1 4 4\n')
else:
assert out.startswith(
' TIME SOURCE ID ANTENNA NO. SUBARRAY FREQ ID ANT FLAG STATUS 1\n'
' DAYS \n'
'---------- --------- ----------- -------- ------- -------- --------\n'
'0.14438657 1 10 1 1 4 4\n')
def test_csv(capsys):
showtable.main([os.path.join(ASCII_ROOT, 't/simple_csv.csv')])
out, err = capsys.readouterr()
assert out.splitlines() == [' a b c ',
'--- --- ---',
' 1 2 3',
' 4 5 6']
def test_ascii_format(capsys):
showtable.main([os.path.join(ASCII_ROOT, 't/commented_header.dat'),
'--format', 'ascii.commented_header'])
out, err = capsys.readouterr()
assert out.splitlines() == [' a b c ',
'--- --- ---',
' 1 2 3',
' 4 5 6']
def test_ascii_delimiter(capsys):
showtable.main([os.path.join(ASCII_ROOT, 't/simple2.txt'),
'--format', 'ascii', '--delimiter', '|'])
out, err = capsys.readouterr()
assert out.splitlines() == [
"obsid redshift X Y object rad ",
"----- -------- ---- ---- ----------- ----",
" 3102 0.32 4167 4085 Q1250+568-A 9.0",
" 3102 0.32 4706 3916 Q1250+568-B 14.0",
" 877 0.22 4378 3892 'Source 82' 12.5",
]
def test_votable(capsys):
showtable.main([os.path.join(VOTABLE_ROOT, 'data/regression.xml'),
'--table-id', 'main_table', '--max-width', '50'])
out, err = capsys.readouterr()
assert out.splitlines() == [
' string_test string_test_2 ... bitarray2 [16]',
'----------------- ------------- ... --------------',
' String & test Fixed stri ... True .. False',
'String & test 0123456789 ... -- .. --',
' XXXX XXXX ... -- .. --',
' ... -- .. --',
' ... -- .. --',
]
def test_max_lines(capsys):
showtable.main([os.path.join(ASCII_ROOT, 't/cds2.dat'),
'--format', 'ascii.cds', '--max-lines', '7',
'--max-width', '30'])
out, err = capsys.readouterr()
assert out.splitlines() == [
' SST ... Note',
' ... ',
'--------------- ... ----',
'041314.1+281910 ... --',
' ... ... ...',
'044427.1+251216 ... --',
'044642.6+245903 ... --',
'Length = 215 rows',
]
def test_show_dtype(capsys):
showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'),
'--show-dtype'])
out, err = capsys.readouterr()
assert out.splitlines() == [
' target V_mag ',
'bytes20 float32',
'------- -------',
'NGC1001 11.1',
'NGC1002 12.3',
'NGC1003 15.2',
]
def test_hide_unit(capsys):
showtable.main([os.path.join(ASCII_ROOT, 't/cds.dat'),
'--format', 'ascii.cds'])
out, err = capsys.readouterr()
assert out.splitlines() == [
'Index RAh RAm RAs DE- DEd DEm DEs Match Class AK Fit ',
' h min s deg arcmin arcsec mag ',
'----- --- --- ----- --- --- ------ ------ ----- ----- --- ----',
' 1 3 28 39.09 + 31 6 1.9 -- I* -- 1.35',
]
showtable.main([os.path.join(ASCII_ROOT, 't/cds.dat'),
'--format', 'ascii.cds', '--hide-unit'])
out, err = capsys.readouterr()
assert out.splitlines() == [
'Index RAh RAm RAs DE- DEd DEm DEs Match Class AK Fit ',
'----- --- --- ----- --- --- --- --- ----- ----- --- ----',
' 1 3 28 39.09 + 31 6 1.9 -- I* -- 1.35',
]
|
c7cc580914f7090b81fe047bf1897230111168effc9bbdc63f4efdf7f844968d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from ...tests.helper import catch_warnings
from ...table import Table, Column, QTable, table_helpers, NdarrayMixin, unique
from ...utils.exceptions import AstropyUserWarning
from ... import time
from ... import units as u
from ... import coordinates
def sort_eq(list1, list2):
return sorted(list1) == sorted(list2)
def test_column_group_by(T1):
for masked in (False, True):
t1 = Table(T1, masked=masked)
t1a = t1['a'].copy()
# Group by a Column (i.e. numpy array)
t1ag = t1a.group_by(t1['a'])
assert np.all(t1ag.groups.indices == np.array([0, 1, 4, 8]))
# Group by a Table
t1ag = t1a.group_by(t1['a', 'b'])
assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
# Group by a numpy structured array
t1ag = t1a.group_by(t1['a', 'b'].as_array())
assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
def test_table_group_by(T1):
"""
Test basic table group_by functionality for possible key types and for
masked/unmasked tables.
"""
for masked in (False, True):
t1 = Table(T1, masked=masked)
# Group by a single column key specified by name
tg = t1.group_by('a')
assert np.all(tg.groups.indices == np.array([0, 1, 4, 8]))
assert str(tg.groups) == "<TableGroups indices=[0 1 4 8]>"
assert str(tg['a'].groups) == "<ColumnGroups indices=[0 1 4 8]>"
# Sorted by 'a' and in original order for rest
assert tg.pformat() == [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3']
assert tg.meta['ta'] == 1
assert tg['c'].meta['a'] == 1
assert tg['c'].description == 'column c'
# Group by a table column
tg2 = t1.group_by(t1['a'])
assert tg.pformat() == tg2.pformat()
# Group by two columns spec'd by name
for keys in (['a', 'b'], ('a', 'b')):
tg = t1.group_by(keys)
assert np.all(tg.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))
# Sorted by 'a', 'b' and in original order for rest
assert tg.pformat() == [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 a 2.0 6',
' 1 a 1.0 7',
' 1 b 3.0 5',
' 2 a 4.0 3',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 c 7.0 0']
# Group by a Table
tg2 = t1.group_by(t1['a', 'b'])
assert tg.pformat() == tg2.pformat()
# Group by a structured array
tg2 = t1.group_by(t1['a', 'b'].as_array())
assert tg.pformat() == tg2.pformat()
# Group by a simple ndarray
tg = t1.group_by(np.array([0, 1, 0, 1, 2, 1, 0, 0]))
assert np.all(tg.groups.indices == np.array([0, 4, 7, 8]))
assert tg.pformat() == [' a b c d ',
'--- --- --- ---',
' 2 c 7.0 0',
' 2 b 6.0 2',
' 1 a 2.0 6',
' 1 a 1.0 7',
' 2 b 5.0 1',
' 2 a 4.0 3',
' 1 b 3.0 5',
' 0 a 0.0 4']
def test_groups_keys(T1):
tg = T1.group_by('a')
keys = tg.groups.keys
assert keys.dtype.names == ('a',)
assert np.all(keys['a'] == np.array([0, 1, 2]))
tg = T1.group_by(['a', 'b'])
keys = tg.groups.keys
assert keys.dtype.names == ('a', 'b')
assert np.all(keys['a'] == np.array([0, 1, 1, 2, 2, 2]))
assert np.all(keys['b'] == np.array(['a', 'a', 'b', 'a', 'b', 'c']))
# Grouping by Column ignores column name
tg = T1.group_by(T1['b'])
keys = tg.groups.keys
assert keys.dtype.names is None
def test_groups_iterator(T1):
tg = T1.group_by('a')
for ii, group in enumerate(tg.groups):
assert group.pformat() == tg.groups[ii].pformat()
assert group['a'][0] == tg['a'][tg.groups.indices[ii]]
def test_grouped_copy(T1):
"""
Test that copying a table or column copies the groups properly
"""
for masked in (False, True):
t1 = Table(T1, masked=masked)
tg = t1.group_by('a')
tgc = tg.copy()
assert np.all(tgc.groups.indices == tg.groups.indices)
assert np.all(tgc.groups.keys == tg.groups.keys)
tac = tg['a'].copy()
assert np.all(tac.groups.indices == tg['a'].groups.indices)
c1 = t1['a'].copy()
gc1 = c1.group_by(t1['a'])
gc1c = gc1.copy()
assert np.all(gc1c.groups.indices == np.array([0, 1, 4, 8]))
def test_grouped_slicing(T1):
"""
Test that slicing a table removes previous grouping
"""
for masked in (False, True):
t1 = Table(T1, masked=masked)
# Regular slice of a table
tg = t1.group_by('a')
tg2 = tg[3:5]
assert np.all(tg2.groups.indices == np.array([0, len(tg2)]))
assert tg2.groups.keys is None
def test_group_column_from_table(T1):
"""
Group a column that is part of a table
"""
cg = T1['c'].group_by(np.array(T1['a']))
assert np.all(cg.groups.keys == np.array([0, 1, 2]))
assert np.all(cg.groups.indices == np.array([0, 1, 4, 8]))
def test_table_groups_mask_index(T1):
"""
Use boolean mask as item in __getitem__ for groups
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by('a')
t2 = t1.groups[np.array([True, False, True])]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys['a'] == np.array([0, 2]))
def test_table_groups_array_index(T1):
"""
Use numpy array as item in __getitem__ for groups
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by('a')
t2 = t1.groups[np.array([0, 2])]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys['a'] == np.array([0, 2]))
def test_table_groups_slicing(T1):
"""
Test that slicing table groups works
"""
for masked in (False, True):
t1 = Table(T1, masked=masked).group_by('a')
# slice(0, 2)
t2 = t1.groups[0:2]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[1].pformat()
assert np.all(t2.groups.keys['a'] == np.array([0, 1]))
# slice(1, 2)
t2 = t1.groups[1:2]
assert len(t2.groups) == 1
assert t2.groups[0].pformat() == t1.groups[1].pformat()
assert np.all(t2.groups.keys['a'] == np.array([1]))
# slice(0, 3, 2)
t2 = t1.groups[0:3:2]
assert len(t2.groups) == 2
assert t2.groups[0].pformat() == t1.groups[0].pformat()
assert t2.groups[1].pformat() == t1.groups[2].pformat()
assert np.all(t2.groups.keys['a'] == np.array([0, 2]))
def test_grouped_item_access(T1):
"""
Test that column slicing preserves grouping
"""
for masked in (False, True):
t1 = Table(T1, masked=masked)
# Regular slice of a table
tg = t1.group_by('a')
tgs = tg['a', 'c', 'd']
assert np.all(tgs.groups.keys == tg.groups.keys)
assert np.all(tgs.groups.indices == tg.groups.indices)
tgsa = tgs.groups.aggregate(np.sum)
assert tgsa.pformat() == [' a c d ',
'--- ---- ---',
' 0 0.0 4',
' 1 6.0 18',
' 2 22.0 6']
tgs = tg['c', 'd']
assert np.all(tgs.groups.keys == tg.groups.keys)
assert np.all(tgs.groups.indices == tg.groups.indices)
tgsa = tgs.groups.aggregate(np.sum)
assert tgsa.pformat() == [' c d ',
'---- ---',
' 0.0 4',
' 6.0 18',
'22.0 6']
def test_mutable_operations(T1):
"""
Operations like adding or deleting a row should removing grouping,
but adding or removing or renaming a column should retain grouping.
"""
for masked in (False, True):
t1 = Table(T1, masked=masked)
# add row
tg = t1.group_by('a')
tg.add_row((0, 'a', 3.0, 4))
assert np.all(tg.groups.indices == np.array([0, len(tg)]))
assert tg.groups.keys is None
# remove row
tg = t1.group_by('a')
tg.remove_row(4)
assert np.all(tg.groups.indices == np.array([0, len(tg)]))
assert tg.groups.keys is None
# add column
tg = t1.group_by('a')
indices = tg.groups.indices.copy()
tg.add_column(Column(name='e', data=np.arange(len(tg))))
assert np.all(tg.groups.indices == indices)
assert np.all(tg['e'].groups.indices == indices)
assert np.all(tg['e'].groups.keys == tg.groups.keys)
# remove column (not key column)
tg = t1.group_by('a')
tg.remove_column('b')
assert np.all(tg.groups.indices == indices)
# Still has original key col names
assert tg.groups.keys.dtype.names == ('a',)
assert np.all(tg['a'].groups.indices == indices)
# remove key column
tg = t1.group_by('a')
tg.remove_column('a')
assert np.all(tg.groups.indices == indices)
assert tg.groups.keys.dtype.names == ('a',)
assert np.all(tg['b'].groups.indices == indices)
# rename key column
tg = t1.group_by('a')
tg.rename_column('a', 'aa')
assert np.all(tg.groups.indices == indices)
assert tg.groups.keys.dtype.names == ('a',)
assert np.all(tg['aa'].groups.indices == indices)
def test_group_by_masked(T1):
t1m = Table(T1, masked=True)
t1m['c'].mask[4] = True
t1m['d'].mask[5] = True
assert t1m.group_by('a').pformat() == [' a b c d ',
'--- --- --- ---',
' 0 a -- 4',
' 1 b 3.0 --',
' 1 a 2.0 6',
' 1 a 1.0 7',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3']
def test_group_by_errors(T1):
"""
Appropriate errors get raised.
"""
# Bad column name as string
with pytest.raises(ValueError):
T1.group_by('f')
# Bad column names in list
with pytest.raises(ValueError):
T1.group_by(['f', 'g'])
# Wrong length array
with pytest.raises(ValueError):
T1.group_by(np.array([1, 2]))
# Wrong type
with pytest.raises(TypeError):
T1.group_by(None)
# Masked key column
t1 = Table(T1, masked=True)
t1['a'].mask[4] = True
with pytest.raises(ValueError):
t1.group_by('a')
def test_groups_keys_meta(T1):
"""
Make sure the keys meta['grouped_by_table_cols'] is working.
"""
# Group by column in this table
tg = T1.group_by('a')
assert tg.groups.keys.meta['grouped_by_table_cols'] is True
assert tg['c'].groups.keys.meta['grouped_by_table_cols'] is True
assert tg.groups[1].groups.keys.meta['grouped_by_table_cols'] is True
assert (tg['d'].groups[np.array([False, True, True])]
.groups.keys.meta['grouped_by_table_cols'] is True)
# Group by external Table
tg = T1.group_by(T1['a', 'b'])
assert tg.groups.keys.meta['grouped_by_table_cols'] is False
assert tg['c'].groups.keys.meta['grouped_by_table_cols'] is False
assert tg.groups[1].groups.keys.meta['grouped_by_table_cols'] is False
# Group by external numpy array
tg = T1.group_by(T1['a', 'b'].as_array())
assert not hasattr(tg.groups.keys, 'meta')
assert not hasattr(tg['c'].groups.keys, 'meta')
# Group by Column
tg = T1.group_by(T1['a'])
assert 'grouped_by_table_cols' not in tg.groups.keys.meta
assert 'grouped_by_table_cols' not in tg['c'].groups.keys.meta
def test_table_aggregate(T1):
"""
Aggregate a table
"""
# Table with only summable cols
t1 = T1['a', 'c', 'd']
tg = t1.group_by('a')
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [' a c d ',
'--- ---- ---',
' 0 0.0 4',
' 1 6.0 18',
' 2 22.0 6']
# Reverts to default groups
assert np.all(tga.groups.indices == np.array([0, 3]))
assert tga.groups.keys is None
# metadata survives
assert tga.meta['ta'] == 1
assert tga['c'].meta['a'] == 1
assert tga['c'].description == 'column c'
# Aggregate with np.sum with masked elements. This results
# in one group with no elements, hence a nan result and conversion
# to float for the 'd' column.
t1m = Table(t1, masked=True)
t1m['c'].mask[4:6] = True
t1m['d'].mask[4:6] = True
tg = t1m.group_by('a')
with catch_warnings(Warning) as warning_lines:
tga = tg.groups.aggregate(np.sum)
assert warning_lines[0].category == UserWarning
assert "converting a masked element to nan" in str(warning_lines[0].message)
assert tga.pformat() == [' a c d ',
'--- ---- ----',
' 0 nan nan',
' 1 3.0 13.0',
' 2 22.0 6.0']
# Aggregrate with np.sum with masked elements, but where every
# group has at least one remaining (unmasked) element. Then
# the int column stays as an int.
t1m = Table(t1, masked=True)
t1m['c'].mask[5] = True
t1m['d'].mask[5] = True
tg = t1m.group_by('a')
tga = tg.groups.aggregate(np.sum)
assert tga.pformat() == [' a c d ',
'--- ---- ---',
' 0 0.0 4',
' 1 3.0 13',
' 2 22.0 6']
# Aggregate with a column type that cannot by supplied to the aggregating
# function. This raises a warning but still works.
tg = T1.group_by('a')
with catch_warnings(Warning) as warning_lines:
tga = tg.groups.aggregate(np.sum)
assert warning_lines[0].category == AstropyUserWarning
assert "Cannot aggregate column" in str(warning_lines[0].message)
assert tga.pformat() == [' a c d ',
'--- ---- ---',
' 0 0.0 4',
' 1 6.0 18',
' 2 22.0 6']
def test_table_aggregate_reduceat(T1):
"""
Aggregate table with functions which have a reduceat method
"""
# Comparison functions without reduceat
def np_mean(x):
return np.mean(x)
def np_sum(x):
return np.sum(x)
def np_add(x):
return np.add(x)
# Table with only summable cols
t1 = T1['a', 'c', 'd']
tg = t1.group_by('a')
# Comparison
tga_r = tg.groups.aggregate(np.sum)
tga_a = tg.groups.aggregate(np.add)
tga_n = tg.groups.aggregate(np_sum)
assert np.all(tga_r == tga_n)
assert np.all(tga_a == tga_n)
assert tga_n.pformat() == [' a c d ',
'--- ---- ---',
' 0 0.0 4',
' 1 6.0 18',
' 2 22.0 6']
tga_r = tg.groups.aggregate(np.mean)
tga_n = tg.groups.aggregate(np_mean)
assert np.all(tga_r == tga_n)
assert tga_n.pformat() == [' a c d ',
'--- --- ---',
' 0 0.0 4.0',
' 1 2.0 6.0',
' 2 5.5 1.5']
# Binary ufunc np_add should raise warning without reduceat
t2 = T1['a', 'c']
tg = t2.group_by('a')
with catch_warnings(Warning) as warning_lines:
tga = tg.groups.aggregate(np_add)
assert warning_lines[0].category == AstropyUserWarning
assert "Cannot aggregate column" in str(warning_lines[0].message)
assert tga.pformat() == [' a ',
'---',
' 0',
' 1',
' 2']
def test_column_aggregate(T1):
"""
Aggregate a single table column
"""
for masked in (False, True):
tg = Table(T1, masked=masked).group_by('a')
tga = tg['c'].groups.aggregate(np.sum)
assert tga.pformat() == [' c ',
'----',
' 0.0',
' 6.0',
'22.0']
def test_table_filter():
"""
Table groups filtering
"""
def all_positive(table, key_colnames):
colnames = [name for name in table.colnames if name not in key_colnames]
for colname in colnames:
if np.any(table[colname] < 0):
return False
return True
# Negative value in 'a' column should not filter because it is a key col
t = Table.read([' a c d',
' -2 7.0 0',
' -2 5.0 1',
' 0 0.0 4',
' 1 3.0 5',
' 1 2.0 -6',
' 1 1.0 7',
' 3 3.0 5',
' 3 -2.0 6',
' 3 1.0 7',
], format='ascii')
tg = t.group_by('a')
t2 = tg.groups.filter(all_positive)
assert t2.groups[0].pformat() == [' a c d ',
'--- --- ---',
' -2 7.0 0',
' -2 5.0 1']
assert t2.groups[1].pformat() == [' a c d ',
'--- --- ---',
' 0 0.0 4']
def test_column_filter():
"""
Table groups filtering
"""
def all_positive(column):
if np.any(column < 0):
return False
return True
# Negative value in 'a' column should not filter because it is a key col
t = Table.read([' a c d',
' -2 7.0 0',
' -2 5.0 1',
' 0 0.0 4',
' 1 3.0 5',
' 1 2.0 -6',
' 1 1.0 7',
' 3 3.0 5',
' 3 -2.0 6',
' 3 1.0 7',
], format='ascii')
tg = t.group_by('a')
c2 = tg['c'].groups.filter(all_positive)
assert len(c2.groups) == 3
assert c2.groups[0].pformat() == [' c ', '---', '7.0', '5.0']
assert c2.groups[1].pformat() == [' c ', '---', '0.0']
assert c2.groups[2].pformat() == [' c ', '---', '3.0', '2.0', '1.0']
def test_group_mixins():
"""
Test grouping a table with mixin columns
"""
# Setup mixins
idx = np.arange(4)
x = np.array([3., 1., 2., 1.])
q = x * u.m
lon = coordinates.Longitude(x * u.deg)
lat = coordinates.Latitude(x * u.deg)
# For Time do J2000.0 + few * 0.1 ns (this requires > 64 bit precision)
tm = time.Time(2000, format='jyear') + time.TimeDelta(x * 1e-10, format='sec')
sc = coordinates.SkyCoord(ra=lon, dec=lat)
aw = table_helpers.ArrayWrapper(x)
nd = np.array([(3, 'c'), (1, 'a'), (2, 'b'), (1, 'a')],
dtype='<i4,|S1').view(NdarrayMixin)
qt = QTable([idx, x, q, lon, lat, tm, sc, aw, nd],
names=['idx', 'x', 'q', 'lon', 'lat', 'tm', 'sc', 'aw', 'nd'])
# Test group_by with each supported mixin type
mixin_keys = ['x', 'q', 'lon', 'lat', 'tm', 'sc', 'aw', 'nd']
for key in mixin_keys:
qtg = qt.group_by(key)
# Test that it got the sort order correct
assert np.all(qtg['idx'] == [1, 3, 2, 0])
# Test that the groups are right
# Note: skip testing SkyCoord column because that doesn't have equality
for name in ['x', 'q', 'lon', 'lat', 'tm', 'aw', 'nd']:
assert np.all(qt[name][[1, 3]] == qtg.groups[0][name])
assert np.all(qt[name][[2]] == qtg.groups[1][name])
assert np.all(qt[name][[0]] == qtg.groups[2][name])
# Test that unique also works with mixins since most of the work is
# done with group_by(). This is using *every* mixin as key.
uqt = unique(qt, keys=mixin_keys)
assert len(uqt) == 3
assert np.all(uqt['idx'] == [1, 2, 0])
assert np.all(uqt['x'] == [1., 2., 3.])
# Column group_by() with mixins
idxg = qt['idx'].group_by(qt[mixin_keys])
assert np.all(idxg == [1, 3, 2, 0])
|
4a84b4991bcaa734f6c3bc6009622c615b3d2a2f4e17430d3686032120e23dfe | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test behavior related to masked tables"""
import pytest
import numpy as np
import numpy.ma as ma
from ...table import Column, MaskedColumn, Table
class SetupData:
def setup_method(self, method):
self.a = MaskedColumn(name='a', data=[1, 2, 3], fill_value=1)
self.b = MaskedColumn(name='b', data=[4, 5, 6], mask=True)
self.c = MaskedColumn(name='c', data=[7, 8, 9], mask=False)
self.d_mask = np.array([False, True, False])
self.d = MaskedColumn(name='d', data=[7, 8, 7], mask=self.d_mask)
self.t = Table([self.a, self.b], masked=True)
self.ca = Column(name='ca', data=[1, 2, 3])
class TestPprint(SetupData):
def test_pformat(self):
assert self.t.pformat() == [' a b ', '--- ---', ' 1 --', ' 2 --', ' 3 --']
class TestFilled:
"""Test the filled method in MaskedColumn and Table"""
def setup_method(self, method):
mask = [True, False, False]
self.meta = {'a': 1, 'b': [2, 3]}
a = self.a = MaskedColumn(name='a', data=[1, 2, 3], fill_value=10, mask=mask, meta={'a': 1})
b = self.b = MaskedColumn(name='b', data=[4.0, 5.0, 6.0], fill_value=10.0, mask=mask)
c = self.c = MaskedColumn(name='c', data=['7', '8', '9'], fill_value='1', mask=mask)
def test_filled_column(self):
f = self.a.filled()
assert np.all(f == [10, 2, 3])
assert isinstance(f, Column)
assert not isinstance(f, MaskedColumn)
# Confirm copy, not ref
assert f.meta['a'] == 1
f.meta['a'] = 2
f[1] = 100
assert self.a[1] == 2
assert self.a.meta['a'] == 1
# Fill with arg fill_value not column fill_value
f = self.a.filled(20)
assert np.all(f == [20, 2, 3])
f = self.b.filled()
assert np.all(f == [10.0, 5.0, 6.0])
assert isinstance(f, Column)
f = self.c.filled()
assert np.all(f == ['1', '8', '9'])
assert isinstance(f, Column)
def test_filled_masked_table(self, tableclass):
t = tableclass([self.a, self.b, self.c], meta=self.meta)
f = t.filled()
assert isinstance(f, Table)
assert f.masked is False
assert np.all(f['a'] == [10, 2, 3])
assert np.allclose(f['b'], [10.0, 5.0, 6.0])
assert np.all(f['c'] == ['1', '8', '9'])
# Confirm copy, not ref
assert f.meta['b'] == [2, 3]
f.meta['b'][0] = 20
assert t.meta['b'] == [2, 3]
f['a'][2] = 100
assert t['a'][2] == 3
def test_filled_unmasked_table(self, tableclass):
t = tableclass([(1, 2), ('3', '4')], names=('a', 'b'), meta=self.meta)
f = t.filled()
assert isinstance(f, Table)
assert f.masked is False
assert np.all(f['a'] == t['a'])
assert np.all(f['b'] == t['b'])
# Confirm copy, not ref
assert f.meta['b'] == [2, 3]
f.meta['b'][0] = 20
assert t.meta['b'] == [2, 3]
f['a'][1] = 100
assert t['a'][1] == 2
class TestFillValue(SetupData):
"""Test setting and getting fill value in MaskedColumn and Table"""
def test_init_set_fill_value(self):
"""Check that setting fill_value in the MaskedColumn init works"""
assert self.a.fill_value == 1
c = MaskedColumn(name='c', data=['xxxx', 'yyyy'], fill_value='none')
assert c.fill_value == 'none'
def test_set_get_fill_value_for_bare_column(self):
"""Check set and get of fill value works for bare Column"""
self.d.fill_value = -999
assert self.d.fill_value == -999
assert np.all(self.d.filled() == [7, -999, 7])
def test_set_get_fill_value_for_str_column(self):
c = MaskedColumn(name='c', data=['xxxx', 'yyyy'], mask=[True, False])
# assert np.all(c.filled() == ['N/A', 'yyyy'])
c.fill_value = 'ABCDEF'
assert c.fill_value == 'ABCD' # string truncated to dtype length
assert np.all(c.filled() == ['ABCD', 'yyyy'])
assert np.all(c.filled('XY') == ['XY', 'yyyy'])
def test_table_column_mask_not_ref(self):
"""Table column mask is not ref of original column mask"""
self.b.fill_value = -999
assert self.t['b'].fill_value != -999
def test_set_get_fill_value_for_table_column(self):
"""Check set and get of fill value works for Column in a Table"""
self.t['b'].fill_value = 1
assert self.t['b'].fill_value == 1
assert np.all(self.t['b'].filled() == [1, 1, 1])
def test_data_attribute_fill_and_mask(self):
"""Check that .data attribute preserves fill_value and mask"""
self.t['b'].fill_value = 1
self.t['b'].mask = [True, False, True]
assert self.t['b'].data.fill_value == 1
assert np.all(self.t['b'].data.mask == [True, False, True])
class TestMaskedColumnInit(SetupData):
"""Initialization of a masked column"""
def test_set_mask_and_not_ref(self):
"""Check that mask gets set properly and that it is a copy, not ref"""
assert np.all(~self.a.mask)
assert np.all(self.b.mask)
assert np.all(~self.c.mask)
assert np.all(self.d.mask == self.d_mask)
self.d.mask[0] = True
assert not np.all(self.d.mask == self.d_mask)
def test_set_mask_from_list(self):
"""Set mask from a list"""
mask_list = [False, True, False]
a = MaskedColumn(name='a', data=[1, 2, 3], mask=mask_list)
assert np.all(a.mask == mask_list)
def test_override_existing_mask(self):
"""Override existing mask values"""
mask_list = [False, True, False]
b = MaskedColumn(name='b', data=self.b, mask=mask_list)
assert np.all(b.mask == mask_list)
def test_incomplete_mask_spec(self):
"""Incomplete mask specification raises MaskError"""
mask_list = [False, True]
with pytest.raises(ma.MaskError):
MaskedColumn(name='b', length=4, mask=mask_list)
class TestTableInit(SetupData):
"""Initializing a table"""
def test_mask_true_if_any_input_masked(self):
"""Masking is True if any input is masked"""
t = Table([self.ca, self.a])
assert t.masked is True
t = Table([self.ca])
assert t.masked is False
t = Table([self.ca, ma.array([1, 2, 3])])
assert t.masked is True
def test_mask_false_if_no_input_masked(self):
"""Masking not true if not (requested or input requires mask)"""
t0 = Table([[3, 4]], masked=False)
t1 = Table(t0, masked=True)
t2 = Table(t1, masked=False)
assert not t0.masked
assert t1.masked
assert not t2.masked
def test_mask_property(self):
t = self.t
# Access table mask (boolean structured array) by column name
assert np.all(t.mask['a'] == np.array([False, False, False]))
assert np.all(t.mask['b'] == np.array([True, True, True]))
# Check that setting mask from table mask has the desired effect on column
t.mask['b'] = np.array([False, True, False])
assert np.all(t['b'].mask == np.array([False, True, False]))
# Non-masked table returns None for mask attribute
t2 = Table([self.ca], masked=False)
assert t2.mask is None
# Set mask property globally and verify local correctness
for mask in (True, False):
t.mask = mask
for name in ('a', 'b'):
assert np.all(t[name].mask == mask)
class TestAddColumn:
def test_add_masked_column_to_masked_table(self):
t = Table(masked=True)
assert t.masked
t.add_column(MaskedColumn(name='a', data=[1, 2, 3], mask=[0, 1, 0]))
assert t.masked
t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1]))
assert t.masked
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert np.all(t['b'] == np.array([4, 5, 6]))
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_masked_column_to_non_masked_table(self):
t = Table(masked=False)
assert not t.masked
t.add_column(Column(name='a', data=[1, 2, 3]))
assert not t.masked
t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1]))
assert t.masked
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 0, 0], bool))
assert np.all(t['b'] == np.array([4, 5, 6]))
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_non_masked_column_to_masked_table(self):
t = Table(masked=True)
assert t.masked
t.add_column(Column(name='a', data=[1, 2, 3]))
assert t.masked
t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1]))
assert t.masked
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 0, 0], bool))
assert np.all(t['b'] == np.array([4, 5, 6]))
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_convert_to_masked_table_only_if_necessary(self):
# Do not convert to masked table, if new column has no masked value.
# See #1185 for details.
t = Table(masked=False)
assert not t.masked
t.add_column(Column(name='a', data=[1, 2, 3]))
assert not t.masked
t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[0, 0, 0]))
assert not t.masked
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['b'] == np.array([4, 5, 6]))
class TestRenameColumn:
def test_rename_masked_column(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1, 2, 3], mask=[0, 1, 0]))
t['a'].fill_value = 42
t.rename_column('a', 'b')
assert t.masked
assert np.all(t['b'] == np.array([1, 2, 3]))
assert np.all(t['b'].mask == np.array([0, 1, 0], bool))
assert t['b'].fill_value == 42
assert t.colnames == ['b']
class TestRemoveColumn:
def test_remove_masked_column(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1, 2, 3], mask=[0, 1, 0]))
t['a'].fill_value = 42
t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1]))
t.remove_column('b')
assert t.masked
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert t['a'].fill_value == 42
assert t.colnames == ['a']
class TestAddRow:
def test_add_masked_row_to_masked_table_iterable(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
t.add_row([2, 5], mask=[1, 0])
t.add_row([3, 6], mask=[0, 1])
assert t.masked
assert np.all(np.array(t['a']) == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert np.all(np.array(t['b']) == np.array([4, 5, 6]))
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping1(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
t.add_row({'b': 5, 'a': 2}, mask={'a': 1, 'b': 0})
t.add_row({'a': 3, 'b': 6}, mask={'b': 1, 'a': 0})
assert t.masked
assert np.all(np.array(t['a']) == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert np.all(np.array(t['b']) == np.array([4, 5, 6]))
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping2(self):
# When adding values to a masked table, if the mask is specified as a
# dict, then values not specified will have mask values set to True
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
t.add_row({'b': 5}, mask={'b': 0})
t.add_row({'a': 3}, mask={'a': 0})
assert t.masked
assert t['a'][0] == 1 and t['a'][2] == 3
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert t['b'][1] == 5
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping3(self):
# When adding values to a masked table, if mask is not passed to
# add_row, then the mask should be set to False if values are present
# and True if not.
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
t.add_row({'b': 5})
t.add_row({'a': 3})
assert t.masked
assert t['a'][0] == 1 and t['a'][2] == 3
assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
assert t['b'][1] == 5
assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
def test_add_masked_row_to_masked_table_mapping4(self):
# When adding values to a masked table, if the mask is specified as a
# dict, then keys in values should match keys in mask
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
with pytest.raises(ValueError) as exc:
t.add_row({'b': 5}, mask={'a': True})
assert exc.value.args[0] == 'keys in mask should match keys in vals'
def test_add_masked_row_to_masked_table_mismatch(self):
t = Table(masked=True)
t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
with pytest.raises(TypeError) as exc:
t.add_row([2, 5], mask={'a': 1, 'b': 0})
assert exc.value.args[0] == "Mismatch between type of vals and mask"
with pytest.raises(TypeError) as exc:
t.add_row({'b': 5, 'a': 2}, mask=[1, 0])
assert exc.value.args[0] == "Mismatch between type of vals and mask"
def test_add_masked_row_to_non_masked_table_iterable(self):
t = Table(masked=False)
t.add_column(Column(name='a', data=[1]))
t.add_column(Column(name='b', data=[4]))
assert not t.masked
t.add_row([2, 5])
assert not t.masked
t.add_row([3, 6], mask=[0, 1])
assert t.masked
assert np.all(np.array(t['a']) == np.array([1, 2, 3]))
assert np.all(t['a'].mask == np.array([0, 0, 0], bool))
assert np.all(np.array(t['b']) == np.array([4, 5, 6]))
assert np.all(t['b'].mask == np.array([0, 0, 1], bool))
def test_setting_from_masked_column():
"""Test issue in #2997"""
mask_b = np.array([True, True, False, False])
for select in (mask_b, slice(0, 2)):
t = Table(masked=True)
t['a'] = Column([1, 2, 3, 4])
t['b'] = MaskedColumn([11, 22, 33, 44], mask=mask_b)
t['c'] = MaskedColumn([111, 222, 333, 444], mask=[True, False, True, False])
t['b'][select] = t['c'][select]
assert t['b'][1] == t[1]['b']
assert t['b'][0] is np.ma.masked # Original state since t['c'][0] is masked
assert t['b'][1] == 222 # New from t['c'] since t['c'][1] is unmasked
assert t['b'][2] == 33
assert t['b'][3] == 44
assert np.all(t['b'].mask == t.mask['b']) # Avoid t.mask in general, this is for testing
mask_before_add = t.mask.copy()
t['d'] = np.arange(len(t))
assert np.all(t.mask['b'] == mask_before_add['b'])
def test_coercing_fill_value_type():
"""
Test that masked column fill_value is coerced into the correct column type.
"""
# This is the original example posted on the astropy@scipy mailing list
t = Table({'a': ['1']}, masked=True)
t['a'].set_fill_value('0')
t2 = Table(t, names=['a'], dtype=[np.int32])
assert isinstance(t2['a'].fill_value, np.int32)
# Unit test the same thing.
c = MaskedColumn(['1'])
c.set_fill_value('0')
c2 = MaskedColumn(c, dtype=np.int32)
assert isinstance(c2.fill_value, np.int32)
def test_mask_copy():
"""Test that the mask is copied when copying a table (issue #7362)."""
c = MaskedColumn([1, 2], mask=[False, True])
c2 = MaskedColumn(c, copy=True)
c2.mask[0] = True
assert np.all(c.mask == [False, True])
assert np.all(c2.mask == [True, True])
|
4cacf0bbe0645cdec4cc618cb9c6134a2bb8f6eaf4cc811f3254de01cc5b966e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import OrderedDict
import pytest
import numpy as np
from ...tests.helper import catch_warnings
from ...table import Table, QTable, TableMergeError
from ...table.operations import _get_out_class
from ... import units as u
from ...utils import metadata
from ...utils.metadata import MergeConflictError
from ... import table
def sort_eq(list1, list2):
return sorted(list1) == sorted(list2)
def skycoord_equal(sc1, sc2):
if not sc1.is_equivalent_frame(sc2):
return False
if sc1.representation is not sc2.representation: # Will be representation_type..
return False
if sc1.shape != sc2.shape:
return False # Maybe raise ValueError corresponding to future numpy behavior
eq = np.ones(shape=sc1.shape, dtype=bool)
for comp in sc1.data.components:
eq &= getattr(sc1.data, comp) == getattr(sc2.data, comp)
return np.all(eq)
class TestJoin():
def _setup(self, t_cls=Table):
lines1 = [' a b c ',
' 0 foo L1',
' 1 foo L2',
' 1 bar L3',
' 2 bar L4']
lines2 = [' a b d ',
' 1 foo R1',
' 1 foo R2',
' 2 bar R3',
' 4 bar R4']
self.t1 = t_cls.read(lines1, format='ascii')
self.t2 = t_cls.read(lines2, format='ascii')
self.t3 = t_cls(self.t2, copy=True)
self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
self.t3.meta.update(OrderedDict([('b', 3), ('c', [1, 2]), ('d', 2), ('a', 1)]))
self.meta_merge = OrderedDict([('b', [1, 2, 3, 4]),
('c', {'a': 1, 'b': 1}),
('d', 1),
('a', 1)])
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.join(self.t1, self.t2, join_type='inner')
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with catch_warnings() as w:
out = table.join(self.t1, self.t3, join_type='inner')
assert len(w) == 3
assert out.meta == self.t3.meta
with catch_warnings() as w:
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='warn')
assert len(w) == 3
assert out.meta == self.t3.meta
with catch_warnings() as w:
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='silent')
assert len(w) == 0
assert out.meta == self.t3.meta
with pytest.raises(MergeConflictError):
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='error')
with pytest.raises(ValueError):
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='nonsense')
def test_both_unmasked_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Basic join with default parameters (inner join on common keys)
t12 = table.join(t1, t2)
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a'])
assert type(t12['b']) is type(t1['b'])
assert type(t12['c']) is type(t1['c'])
assert type(t12['d']) is type(t2['d'])
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3'])
# Table meta merged properly
assert t12.meta == self.meta_merge
def test_both_unmasked_left_right_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Left join
t12 = table.join(t1, t2, join_type='left')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 foo L1 --',
' 1 bar L3 --',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3'])
# Right join
t12 = table.join(t1, t2, join_type='right')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3',
' 4 bar -- R4'])
# Outer join
t12 = table.join(t1, t2, join_type='outer')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 foo L1 --',
' 1 bar L3 --',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3',
' 4 bar -- R4'])
# Check that the common keys are 'a', 'b'
t12a = table.join(t1, t2, join_type='outer')
t12b = table.join(t1, t2, join_type='outer', keys=['a', 'b'])
assert np.all(t12a.as_array() == t12b.as_array())
def test_both_unmasked_single_key_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Inner join on 'a' column
t12 = table.join(t1, t2, keys='a')
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a'])
assert type(t12['b_1']) is type(t1['b'])
assert type(t12['c']) is type(t1['c'])
assert type(t12['b_2']) is type(t2['b'])
assert type(t12['d']) is type(t2['d'])
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3'])
def test_both_unmasked_single_key_left_right_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Left join
t12 = table.join(t1, t2, join_type='left', keys='a')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 0 foo L1 -- --',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3'])
# Right join
t12 = table.join(t1, t2, join_type='right', keys='a')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3',
' 4 -- -- bar R4'])
# Outer join
t12 = table.join(t1, t2, join_type='outer', keys='a')
assert t12.masked is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 0 foo L1 -- --',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3',
' 4 -- -- bar R4'])
def test_masked_unmasked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t1m = operation_table_type(self.t1, masked=True)
t2 = self.t2
# Result should be masked even though not req'd by inner join
t1m2 = table.join(t1m, t2, join_type='inner')
assert t1m2.masked is True
# Result should match non-masked result
t12 = table.join(t1, t2)
assert np.all(t12.as_array() == np.array(t1m2))
# Mask out some values in left table and make sure they propagate
t1m['b'].mask[1] = True
t1m['c'].mask[2] = True
t1m2 = table.join(t1m, t2, join_type='inner', keys='a')
assert sort_eq(t1m2.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 -- L2 foo R1',
' 1 -- L2 foo R2',
' 1 bar -- foo R1',
' 1 bar -- foo R2',
' 2 bar L4 bar R3'])
t21m = table.join(t2, t1m, join_type='inner', keys='a')
assert sort_eq(t21m.pformat(), [' a b_1 d b_2 c ',
'--- --- --- --- ---',
' 1 foo R2 -- L2',
' 1 foo R2 bar --',
' 1 foo R1 -- L2',
' 1 foo R1 bar --',
' 2 bar R3 bar L4'])
def test_masked_masked(self, operation_table_type):
self._setup(operation_table_type)
"""Two masked tables"""
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
t1 = self.t1
t1m = operation_table_type(self.t1, masked=True)
t2 = self.t2
t2m = operation_table_type(self.t2, masked=True)
# Result should be masked even though not req'd by inner join
t1m2m = table.join(t1m, t2m, join_type='inner')
assert t1m2m.masked is True
# Result should match non-masked result
t12 = table.join(t1, t2)
assert np.all(t12.as_array() == np.array(t1m2m))
# Mask out some values in both tables and make sure they propagate
t1m['b'].mask[1] = True
t1m['c'].mask[2] = True
t2m['d'].mask[2] = True
t1m2m = table.join(t1m, t2m, join_type='inner', keys='a')
assert sort_eq(t1m2m.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 -- L2 foo R1',
' 1 -- L2 foo R2',
' 1 bar -- foo R1',
' 1 bar -- foo R2',
' 2 bar L4 bar --'])
def test_col_rename(self, operation_table_type):
self._setup(operation_table_type)
"""
Test auto col renaming when there is a conflict. Use
non-default values of uniq_col_name and table_names.
"""
t1 = self.t1
t2 = self.t2
t12 = table.join(t1, t2, uniq_col_name='x_{table_name}_{col_name}_y',
table_names=['L', 'R'], keys='a')
assert t12.colnames == ['a', 'x_L_b_y', 'c', 'x_R_b_y', 'd']
def test_rename_conflict(self, operation_table_type):
self._setup(operation_table_type)
"""
Test that auto-column rename fails because of a conflict
with an existing column
"""
t1 = self.t1
t2 = self.t2
t1['b_1'] = 1 # Add a new column b_1 that will conflict with auto-rename
with pytest.raises(TableMergeError):
table.join(t1, t2, keys='a')
def test_missing_keys(self, operation_table_type):
self._setup(operation_table_type)
"""Merge on a key column that doesn't exist"""
t1 = self.t1
t2 = self.t2
with pytest.raises(TableMergeError):
table.join(t1, t2, keys=['a', 'not there'])
def test_bad_join_type(self, operation_table_type):
self._setup(operation_table_type)
"""Bad join_type input"""
t1 = self.t1
t2 = self.t2
with pytest.raises(ValueError):
table.join(t1, t2, join_type='illegal value')
def test_no_common_keys(self, operation_table_type):
self._setup(operation_table_type)
"""Merge tables with no common keys"""
t1 = self.t1
t2 = self.t2
del t1['a']
del t1['b']
del t2['a']
del t2['b']
with pytest.raises(TableMergeError):
table.join(t1, t2)
def test_masked_key_column(self, operation_table_type):
self._setup(operation_table_type)
"""Merge on a key column that has a masked element"""
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
t1 = self.t1
t2 = operation_table_type(self.t2, masked=True)
table.join(t1, t2) # OK
t2['a'].mask[0] = True
with pytest.raises(TableMergeError):
table.join(t1, t2)
def test_col_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t2.rename_column('d', 'c') # force col conflict and renaming
meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])
meta2 = OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])
# Key col 'a', should first value ('cm')
t1['a'].unit = 'cm'
t2['a'].unit = 'm'
# Key col 'b', take first value 't1_b'
t1['b'].info.description = 't1_b'
# Key col 'b', take first non-empty value 't1_b'
t2['b'].info.format = '%6s'
# Key col 'a', should be merged meta
t1['a'].info.meta = meta1
t2['a'].info.meta = meta2
# Key col 'b', should be meta2
t2['b'].info.meta = meta2
# All these should pass through
t1['c'].info.format = '%3s'
t1['c'].info.description = 't1_c'
t2['c'].info.format = '%6s'
t2['c'].info.description = 't2_c'
with catch_warnings(metadata.MergeConflictWarning) as warning_lines:
t12 = table.join(t1, t2, keys=['a', 'b'])
if operation_table_type is Table:
assert warning_lines[0].category == metadata.MergeConflictWarning
assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)"
in str(warning_lines[0].message))
else:
assert len(warning_lines) == 0
assert t12['a'].unit == 'm'
assert t12['b'].info.description == 't1_b'
assert t12['b'].info.format == '%6s'
assert t12['a'].info.meta == self.meta_merge
assert t12['b'].info.meta == meta2
assert t12['c_1'].info.format == '%3s'
assert t12['c_1'].info.description == 't1_c'
assert t12['c_2'].info.format == '%6s'
assert t12['c_2'].info.description == 't2_c'
def test_join_multidimensional(self, operation_table_type):
self._setup(operation_table_type)
# Regression test for #2984, which was an issue where join did not work
# on multi-dimensional columns.
t1 = operation_table_type()
t1['a'] = [1, 2, 3]
t1['b'] = np.ones((3, 4))
t2 = operation_table_type()
t2['a'] = [1, 2, 3]
t2['c'] = [4, 5, 6]
t3 = table.join(t1, t2)
np.testing.assert_allclose(t3['a'], t1['a'])
np.testing.assert_allclose(t3['b'], t1['b'])
np.testing.assert_allclose(t3['c'], t2['c'])
def test_join_multidimensional_masked(self, operation_table_type):
self._setup(operation_table_type)
"""
Test for outer join with multidimensional columns where masking is required.
(Issue #4059).
"""
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
a = table.MaskedColumn([1, 2, 3], name='a')
a2 = table.Column([1, 3, 4], name='a')
b = table.MaskedColumn([[1, 2],
[3, 4],
[5, 6]],
name='b',
mask=[[1, 0],
[0, 1],
[0, 0]])
c = table.Column([[1, 1],
[2, 2],
[3, 3]],
name='c')
t1 = operation_table_type([a, b])
t2 = operation_table_type([a2, c])
t12 = table.join(t1, t2, join_type='inner')
assert np.all(t12['b'].mask == [[True, False],
[False, False]])
assert np.all(t12['c'].mask == [[False, False],
[False, False]])
t12 = table.join(t1, t2, join_type='outer')
assert np.all(t12['b'].mask == [[True, False],
[False, True],
[False, False],
[True, True]])
assert np.all(t12['c'].mask == [[False, False],
[True, True],
[False, False],
[False, False]])
def test_mixin_functionality(self, mixin_cols):
col = mixin_cols['m']
cls_name = type(col).__name__
len_col = len(col)
idx = np.arange(len_col)
t1 = table.QTable([idx, col], names=['idx', 'm1'])
t2 = table.QTable([idx, col], names=['idx', 'm2'])
# Set up join mismatches for different join_type cases
t1 = t1[[0, 1, 3]]
t2 = t2[[0, 2, 3]]
# Test inner join, which works for all mixin_cols
out = table.join(t1, t2, join_type='inner')
assert len(out) == 2
assert out['m2'].__class__ is col.__class__
assert np.all(out['idx'] == [0, 3])
if cls_name == 'SkyCoord':
# SkyCoord doesn't support __eq__ so use our own
assert skycoord_equal(out['m1'], col[[0, 3]])
assert skycoord_equal(out['m2'], col[[0, 3]])
else:
assert np.all(out['m1'] == col[[0, 3]])
assert np.all(out['m2'] == col[[0, 3]])
# Check for left, right, outer join which requires masking. Only Time
# supports this currently.
if cls_name == 'Time':
out = table.join(t1, t2, join_type='left')
assert len(out) == 3
assert np.all(out['idx'] == [0, 1, 3])
assert np.all(out['m1'] == t1['m1'])
assert np.all(out['m2'] == t2['m2'])
assert np.all(out['m1'].mask == [False, False, False])
assert np.all(out['m2'].mask == [False, True, False])
out = table.join(t1, t2, join_type='right')
assert len(out) == 3
assert np.all(out['idx'] == [0, 2, 3])
assert np.all(out['m1'] == t1['m1'])
assert np.all(out['m2'] == t2['m2'])
assert np.all(out['m1'].mask == [False, True, False])
assert np.all(out['m2'].mask == [False, False, False])
out = table.join(t1, t2, join_type='outer')
assert len(out) == 4
assert np.all(out['idx'] == [0, 1, 2, 3])
assert np.all(out['m1'] == col)
assert np.all(out['m2'] == col)
assert np.all(out['m1'].mask == [False, False, True, False])
assert np.all(out['m2'].mask == [False, True, False, False])
else:
# Otherwise make sure it fails with the right exception message
for join_type in ('outer', 'left', 'right'):
with pytest.raises(NotImplementedError) as err:
table.join(t1, t2, join_type='outer')
assert ('join requires masking' in str(err) or
'join unavailable' in str(err))
class TestSetdiff():
def _setup(self, t_cls=Table):
lines1 = [' a b ',
' 0 foo ',
' 1 foo ',
' 1 bar ',
' 2 bar ']
lines2 = [' a b ',
' 0 foo ',
' 3 foo ',
' 4 bar ',
' 2 bar ']
lines3 = [' a b d ',
' 0 foo R1',
' 8 foo R2',
' 1 bar R3',
' 4 bar R4']
self.t1 = t_cls.read(lines1, format='ascii')
self.t2 = t_cls.read(lines2, format='ascii')
self.t3 = t_cls.read(lines3, format='ascii')
def test_default_same_columns(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t2)
assert type(out['a']) is type(self.t1['a'])
assert type(out['b']) is type(self.t1['b'])
assert out.pformat() == [' a b ',
'--- ---',
' 1 bar',
' 1 foo']
def test_default_same_tables(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t1)
assert type(out['a']) is type(self.t1['a'])
assert type(out['b']) is type(self.t1['b'])
assert out.pformat() == [' a b ',
'--- ---']
def test_extra_col_left_table(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
out = table.setdiff(self.t3, self.t1)
def test_extra_col_right_table(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t3)
assert type(out['a']) is type(self.t1['a'])
assert type(out['b']) is type(self.t1['b'])
assert out.pformat() == [' a b ',
'--- ---',
' 1 foo',
' 2 bar']
def test_keys(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t3, self.t1, keys=['a', 'b'])
assert type(out['a']) is type(self.t1['a'])
assert type(out['b']) is type(self.t1['b'])
assert out.pformat() == [' a b d ',
'--- --- ---',
' 4 bar R4',
' 8 foo R2']
def test_missing_key(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
out = table.setdiff(self.t3, self.t1, keys=['a', 'd'])
class TestVStack():
def _setup(self, t_cls=Table):
self.t1 = t_cls.read([' a b',
' 0. foo',
' 1. bar'], format='ascii')
self.t2 = t_cls.read([' a b c',
' 2. pez 4',
' 3. sez 5'], format='ascii')
self.t3 = t_cls.read([' a b',
' 4. 7',
' 5. 8',
' 6. 9'], format='ascii')
self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table)
# The following table has meta-data that conflicts with t1
self.t5 = t_cls(self.t1, copy=True)
self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)]))
self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]),
('c', {'a': 1, 'b': 1, 'c': 1}),
('d', 1),
('a', 1),
('e', 1)])
def test_stack_rows(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t1.copy()
t2.meta.clear()
out = table.vstack([self.t1, t2[1]])
assert type(out['a']) is type(self.t1['a'])
assert type(out['b']) is type(self.t1['b'])
assert out.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'1.0 bar']
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.vstack([self.t1, self.t2, self.t4], join_type='inner')
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with catch_warnings() as w:
out = table.vstack([self.t1, self.t5], join_type='inner')
assert len(w) == 2
assert out.meta == self.t5.meta
with catch_warnings() as w:
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn')
assert len(w) == 2
assert out.meta == self.t5.meta
with catch_warnings() as w:
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent')
assert len(w) == 0
assert out.meta == self.t5.meta
with pytest.raises(MergeConflictError):
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error')
with pytest.raises(ValueError):
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense')
def test_bad_input_type(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(TypeError):
table.vstack([])
with pytest.raises(TypeError):
table.vstack(1)
with pytest.raises(TypeError):
table.vstack([self.t2, 1])
with pytest.raises(ValueError):
table.vstack([self.t1, self.t2], join_type='invalid join type')
def test_stack_basic_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t12 = table.vstack([t1, t2], join_type='inner')
assert t12.masked is False
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a'])
assert type(t12['b']) is type(t1['b'])
assert t12.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'2.0 pez',
'3.0 sez']
t124 = table.vstack([t1, t2, t4], join_type='inner')
assert type(t124) is operation_table_type
assert type(t12['a']) is type(t1['a'])
assert type(t12['b']) is type(t1['b'])
assert t124.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'2.0 pez',
'3.0 sez',
'0.0 foo',
'1.0 bar']
def test_stack_basic_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t12 = table.vstack([t1, t2], join_type='outer')
assert t12.pformat() == [' a b c ',
'--- --- ---',
'0.0 foo --',
'1.0 bar --',
'2.0 pez 4',
'3.0 sez 5']
t124 = table.vstack([t1, t2, t4], join_type='outer')
assert t124.pformat() == [' a b c ',
'--- --- ---',
'0.0 foo --',
'1.0 bar --',
'2.0 pez 4',
'3.0 sez 5',
'0.0 foo --',
'1.0 bar --']
def test_stack_incompatible(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, self.t3], join_type='inner')
assert ("The 'b' columns have incompatible types: {0}"
.format([self.t1['b'].dtype.name, self.t3['b'].dtype.name])
in str(excinfo))
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, self.t3], join_type='outer')
assert "The 'b' columns have incompatible types:" in str(excinfo)
with pytest.raises(TableMergeError):
table.vstack([self.t1, self.t2], join_type='exact')
t1_reshape = self.t1.copy()
t1_reshape['b'].shape = [2, 1]
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, t1_reshape])
assert "have different shape" in str(excinfo)
def test_vstack_one_masked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t4 = self.t4
t4['b'].mask[1] = True
assert table.vstack([t1, t4]).pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'0.0 foo',
'1.0 --']
def test_col_meta_merge_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Key col 'a', should last value ('km')
t1['a'].info.unit = 'cm'
t2['a'].info.unit = 'm'
t4['a'].info.unit = 'km'
# Key col 'a' format should take last when all match
t1['a'].info.format = '%f'
t2['a'].info.format = '%f'
t4['a'].info.format = '%f'
# Key col 'b', take first value 't1_b'
t1['b'].info.description = 't1_b'
# Key col 'b', take first non-empty value '%6s'
t4['b'].info.format = '%6s'
# Key col 'a', should be merged meta
t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
# Key col 'b', should be meta2
t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
with catch_warnings(metadata.MergeConflictWarning) as warning_lines:
out = table.vstack([t1, t2, t4], join_type='inner')
if operation_table_type is Table:
assert warning_lines[0].category == metadata.MergeConflictWarning
assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)"
in str(warning_lines[0].message))
assert warning_lines[1].category == metadata.MergeConflictWarning
assert ("In merged column 'a' the 'unit' attribute does not match (m != km)"
in str(warning_lines[1].message))
# Check units are suitably ignored for a regular Table
assert out.pformat() == [' a b ',
' km ',
'-------- ------',
'0.000000 foo',
'1.000000 bar',
'2.000000 pez',
'3.000000 sez',
'0.000000 foo',
'1.000000 bar']
else:
assert len(warning_lines) == 0
# Check QTable correctly dealt with units.
assert out.pformat() == [' a b ',
' km ',
'-------- ------',
'0.000000 foo',
'0.000010 bar',
'0.002000 pez',
'0.003000 sez',
'0.000000 foo',
'1.000000 bar']
assert out['a'].info.unit == 'km'
assert out['a'].info.format == '%f'
assert out['b'].info.description == 't1_b'
assert out['b'].info.format == '%6s'
assert out['a'].info.meta == self.meta_merge
assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])
def test_col_meta_merge_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Key col 'a', should last value ('km')
t1['a'].unit = 'cm'
t2['a'].unit = 'm'
t4['a'].unit = 'km'
# Key col 'a' format should take last when all match
t1['a'].info.format = '%0d'
t2['a'].info.format = '%0d'
t4['a'].info.format = '%0d'
# Key col 'b', take first value 't1_b'
t1['b'].info.description = 't1_b'
# Key col 'b', take first non-empty value '%6s'
t4['b'].info.format = '%6s'
# Key col 'a', should be merged meta
t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
# Key col 'b', should be meta2
t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
# All these should pass through
t2['c'].unit = 'm'
t2['c'].info.format = '%6s'
t2['c'].info.description = 't2_c'
with catch_warnings(metadata.MergeConflictWarning) as warning_lines:
out = table.vstack([t1, t2, t4], join_type='outer')
assert warning_lines[0].category == metadata.MergeConflictWarning
assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)"
in str(warning_lines[0].message))
assert warning_lines[1].category == metadata.MergeConflictWarning
assert ("In merged column 'a' the 'unit' attribute does not match (m != km)"
in str(warning_lines[1].message))
assert out['a'].unit == 'km'
assert out['a'].info.format == '%0d'
assert out['b'].info.description == 't1_b'
assert out['b'].info.format == '%6s'
assert out['a'].info.meta == self.meta_merge
assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])
assert out['c'].info.unit == 'm'
assert out['c'].info.format == '%6s'
assert out['c'].info.description == 't2_c'
def test_vstack_one_table(self, operation_table_type):
self._setup(operation_table_type)
"""Regression test for issue #3313"""
assert (self.t1 == table.vstack(self.t1)).all()
assert (self.t1 == table.vstack([self.t1])).all()
def test_mixin_functionality(self, mixin_cols):
col = mixin_cols['m']
len_col = len(col)
t = table.QTable([col], names=['a'])
cls_name = type(col).__name__
# Vstack works for these classes:
implemented_mixin_classes = ['Quantity', 'Angle', 'Time',
'Latitude', 'Longitude',
'EarthLocation']
if cls_name in implemented_mixin_classes:
out = table.vstack([t, t])
assert len(out) == len_col * 2
assert np.all(out['a'][:len_col] == col)
assert np.all(out['a'][len_col:] == col)
else:
with pytest.raises(NotImplementedError) as err:
table.vstack([t, t])
assert ('vstack unavailable for mixin column type(s): {}'
.format(cls_name) in str(err))
# Check for outer stack which requires masking. Only Time supports
# this currently.
t2 = table.QTable([col], names=['b']) # different from col name for t
if cls_name == 'Time':
out = table.vstack([t, t2], join_type='outer')
assert len(out) == len_col * 2
assert np.all(out['a'][:len_col] == col)
assert np.all(out['b'][len_col:] == col)
assert np.all(out['a'].mask == [False] * len_col + [True] * len_col)
assert np.all(out['b'].mask == [True] * len_col + [False] * len_col)
else:
with pytest.raises(NotImplementedError) as err:
table.vstack([t, t2], join_type='outer')
assert ('vstack requires masking' in str(err) or
'vstack unavailable' in str(err))
class TestHStack():
def _setup(self, t_cls=Table):
self.t1 = t_cls.read([' a b',
' 0. foo',
' 1. bar'], format='ascii')
self.t2 = t_cls.read([' a b c',
' 2. pez 4',
' 3. sez 5'], format='ascii')
self.t3 = t_cls.read([' d e',
' 4. 7',
' 5. 8',
' 6. 9'], format='ascii')
self.t4 = t_cls(self.t1, copy=True, masked=True)
self.t4['a'].name = 'f'
self.t4['b'].name = 'g'
# The following table has meta-data that conflicts with t1
self.t5 = t_cls(self.t1, copy=True)
self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)]))
self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]),
('c', {'a': 1, 'b': 1, 'c': 1}),
('d', 1),
('a', 1),
('e', 1)])
def test_stack_same_table(self, operation_table_type):
"""
From #2995, test that hstack'ing references to the same table has the
expected output.
"""
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t1])
assert out.pformat() == ['a_1 b_1 a_2 b_2',
'--- --- --- ---',
'0.0 foo 0.0 foo',
'1.0 bar 1.0 bar']
def test_stack_rows(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1[0], self.t2[1]])
assert out.pformat() == ['a_1 b_1 a_2 b_2 c ',
'--- --- --- --- ---',
'0.0 foo 3.0 sez 5']
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2, self.t4], join_type='inner')
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with catch_warnings() as w:
out = table.hstack([self.t1, self.t5], join_type='inner')
assert len(w) == 2
assert out.meta == self.t5.meta
with catch_warnings() as w:
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn')
assert len(w) == 2
assert out.meta == self.t5.meta
with catch_warnings() as w:
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent')
assert len(w) == 0
assert out.meta == self.t5.meta
with pytest.raises(MergeConflictError):
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error')
with pytest.raises(ValueError):
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense')
def test_bad_input_type(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(TypeError):
table.hstack([])
with pytest.raises(TypeError):
table.hstack(1)
with pytest.raises(TypeError):
table.hstack([self.t2, 1])
with pytest.raises(ValueError):
table.hstack([self.t1, self.t2], join_type='invalid join type')
def test_stack_basic(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t3 = self.t3
t4 = self.t4
out = table.hstack([t1, t2], join_type='inner')
assert out.masked is False
assert type(out) is operation_table_type
assert type(out['a_1']) is type(t1['a'])
assert type(out['b_1']) is type(t1['b'])
assert type(out['a_2']) is type(t2['a'])
assert type(out['b_2']) is type(t2['b'])
assert out.pformat() == ['a_1 b_1 a_2 b_2 c ',
'--- --- --- --- ---',
'0.0 foo 2.0 pez 4',
'1.0 bar 3.0 sez 5']
# stacking as a list gives same result
out_list = table.hstack([t1, t2], join_type='inner')
assert out.pformat() == out_list.pformat()
out = table.hstack([t1, t2], join_type='outer')
assert out.pformat() == out_list.pformat()
out = table.hstack([t1, t2, t3, t4], join_type='outer')
assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ',
'--- --- --- --- --- --- --- --- ---',
'0.0 foo 2.0 pez 4 4.0 7 0.0 foo',
'1.0 bar 3.0 sez 5 5.0 8 1.0 bar',
' -- -- -- -- -- 6.0 9 -- --']
out = table.hstack([t1, t2, t3, t4], join_type='inner')
assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ',
'--- --- --- --- --- --- --- --- ---',
'0.0 foo 2.0 pez 4 4.0 7 0.0 foo',
'1.0 bar 3.0 sez 5 5.0 8 1.0 bar']
def test_stack_incompatible(self, operation_table_type):
self._setup(operation_table_type)
# For join_type exact, which will fail here because n_rows
# does not match
with pytest.raises(TableMergeError):
table.hstack([self.t1, self.t3], join_type='exact')
def test_hstack_one_masked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail()
self._setup(operation_table_type)
t1 = self.t1
t2 = operation_table_type(t1, copy=True, masked=True)
t2.meta.clear()
t2['b'].mask[1] = True
assert table.hstack([t1, t2]).pformat() == ['a_1 b_1 a_2 b_2',
'--- --- --- ---',
'0.0 foo 0.0 foo',
'1.0 bar 1.0 --']
def test_table_col_rename(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2], join_type='inner',
uniq_col_name='{table_name}_{col_name}',
table_names=('left', 'right'))
assert out.masked is False
assert out.pformat() == ['left_a left_b right_a right_b c ',
'------ ------ ------- ------- ---',
' 0.0 foo 2.0 pez 4',
' 1.0 bar 3.0 sez 5']
def test_col_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t3 = self.t3[:2]
t4 = self.t4
# Just set a bunch of meta and make sure it is the same in output
meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])
t1['a'].unit = 'cm'
t1['b'].info.description = 't1_b'
t4['f'].info.format = '%6s'
t1['b'].info.meta.update(meta1)
t3['d'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t4['g'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
t3['e'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t3['d'].unit = 'm'
t3['d'].info.format = '%6s'
t3['d'].info.description = 't3_c'
with catch_warnings(metadata.MergeConflictWarning) as warning_lines:
out = table.hstack([t1, t3, t4], join_type='exact')
assert len(warning_lines) == 0
for t in [t1, t3, t4]:
for name in t.colnames:
for attr in ('meta', 'unit', 'format', 'description'):
assert getattr(out[name].info, attr) == getattr(t[name].info, attr)
# Make sure we got a copy of meta, not ref
t1['b'].info.meta['b'] = None
assert out['b'].info.meta['b'] == [1, 2]
def test_hstack_one_table(self, operation_table_type):
self._setup(operation_table_type)
"""Regression test for issue #3313"""
assert (self.t1 == table.hstack(self.t1)).all()
assert (self.t1 == table.hstack([self.t1])).all()
def test_mixin_functionality(self, mixin_cols):
col1 = mixin_cols['m']
col2 = col1[2:4] # Shorter version of col1
t1 = table.QTable([col1])
t2 = table.QTable([col2])
cls_name = type(col1).__name__
out = table.hstack([t1, t2], join_type='inner')
assert type(out['col0_1']) is type(out['col0_2'])
assert len(out) == len(col2)
# Check that columns are as expected.
if cls_name == 'SkyCoord':
assert skycoord_equal(out['col0_1'], col1[:len(col2)])
assert skycoord_equal(out['col0_2'], col2)
else:
assert np.all(out['col0_1'] == col1[:len(col2)])
assert np.all(out['col0_2'] == col2)
# Time class supports masking, all other mixins do not
if cls_name == 'Time':
out = table.hstack([t1, t2], join_type='outer')
assert len(out) == len(t1)
assert np.all(out['col0_1'] == col1)
assert np.all(out['col0_2'][:len(col2)] == col2)
assert np.all(out['col0_2'].mask == [False, False, True, True])
else:
with pytest.raises(NotImplementedError) as err:
table.hstack([t1, t2], join_type='outer')
assert 'hstack requires masking' in str(err)
def test_unique(operation_table_type):
t = operation_table_type.read(
[' a b c d',
' 2 b 7.0 0',
' 1 c 3.0 5',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 1 a 1.0 7',
' 2 b 5.0 1',
' 0 a 0.0 4',
' 1 a 2.0 6',
' 1 c 3.0 5',
], format='ascii')
tu = operation_table_type(np.sort(t[:-1]))
t_all = table.unique(t)
assert sort_eq(t_all.pformat(), tu.pformat())
t_s = t.copy()
del t_s['b', 'c', 'd']
t_all = table.unique(t_s)
assert sort_eq(t_all.pformat(), [' a ',
'---',
' 0',
' 1',
' 2'])
key1 = 'a'
t1a = table.unique(t, key1)
assert sort_eq(t1a.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 c 3.0 5',
' 2 b 7.0 0'])
t1b = table.unique(t, key1, keep='last')
assert sort_eq(t1b.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 c 3.0 5',
' 2 b 5.0 1'])
t1c = table.unique(t, key1, keep='none')
assert sort_eq(t1c.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4'])
key2 = ['a', 'b']
t2a = table.unique(t, key2)
assert sort_eq(t2a.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 a 1.0 7',
' 1 c 3.0 5',
' 2 a 4.0 3',
' 2 b 7.0 0'])
t2b = table.unique(t, key2, keep='last')
assert sort_eq(t2b.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 a 2.0 6',
' 1 c 3.0 5',
' 2 a 4.0 3',
' 2 b 5.0 1'])
t2c = table.unique(t, key2, keep='none')
assert sort_eq(t2c.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 2 a 4.0 3'])
key2 = ['a', 'a']
with pytest.raises(ValueError) as exc:
t2a = table.unique(t, key2)
assert exc.value.args[0] == "duplicate key names"
with pytest.raises(ValueError) as exc:
table.unique(t, key2, keep=True)
assert exc.value.args[0] == (
"'keep' should be one of 'first', 'last', 'none'")
t1_m = operation_table_type(t1a, masked=True)
t1_m['a'].mask[1] = True
with pytest.raises(ValueError) as exc:
t1_mu = table.unique(t1_m)
assert exc.value.args[0] == (
"cannot use columns with masked values as keys; "
"remove column 'a' from keys and rerun unique()")
t1_mu = table.unique(t1_m, silent=True)
assert t1_mu.pformat() == [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 2 b 7.0 0',
' -- c 3.0 5']
with pytest.raises(ValueError) as e:
t1_mu = table.unique(t1_m, silent=True, keys='a')
t1_m = operation_table_type(t, masked=True)
t1_m['a'].mask[1] = True
t1_m['d'].mask[3] = True
# Test that multiple masked key columns get removed in the correct
# order
t1_mu = table.unique(t1_m, keys=['d', 'a', 'b'], silent=True)
assert t1_mu.pformat() == [' a b c d ',
'--- --- --- ---',
' 2 a 4.0 --',
' 2 b 7.0 0',
' -- c 3.0 5']
def test_vstack_bytes(operation_table_type):
"""
Test for issue #5617 when vstack'ing bytes columns in Py3.
This is really an upsteam numpy issue numpy/numpy/#8403.
"""
t = operation_table_type([[b'a']], names=['a'])
assert t['a'].itemsize == 1
t2 = table.vstack([t, t])
assert len(t2) == 2
assert t2['a'].itemsize == 1
def test_vstack_unicode():
"""
Test for problem related to issue #5617 when vstack'ing *unicode*
columns. In this case the character size gets multiplied by 4.
"""
t = table.Table([[u'a']], names=['a'])
assert t['a'].itemsize == 4 # 4-byte / char for U dtype
t2 = table.vstack([t, t])
assert len(t2) == 2
assert t2['a'].itemsize == 4
def test_get_out_class():
c = table.Column([1, 2])
mc = table.MaskedColumn([1, 2])
q = [1, 2] * u.m
assert _get_out_class([c, mc]) is mc.__class__
assert _get_out_class([mc, c]) is mc.__class__
assert _get_out_class([c, c]) is c.__class__
assert _get_out_class([c]) is c.__class__
with pytest.raises(ValueError):
_get_out_class([c, q])
with pytest.raises(ValueError):
_get_out_class([q, c])
def test_masking_required_exception():
"""
Test that outer join, hstack and vstack fail for a mixin column which
does not support masking.
"""
col = [1, 2, 3, 4] * u.m
t1 = table.QTable([[1, 2, 3, 4], col], names=['a', 'b'])
t2 = table.QTable([[1, 2], col[:2]], names=['a', 'c'])
with pytest.raises(NotImplementedError) as err:
table.vstack([t1, t2], join_type='outer')
assert 'vstack requires masking' in str(err)
with pytest.raises(NotImplementedError) as err:
table.hstack([t1, t2], join_type='outer')
assert 'hstack requires masking' in str(err)
with pytest.raises(NotImplementedError) as err:
table.join(t1, t2, join_type='outer')
assert 'join requires masking' in str(err)
|
61d959b1041255487ea4b8d0a9577fec34ff22157b584ae3b939103b37a5bd56 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import operator
import pytest
import numpy as np
from ...tests.helper import assert_follows_unicode_guidelines, catch_warnings
from ... import table
from ... import units as u
class TestColumn():
def test_subclass(self, Column):
c = Column(name='a')
assert isinstance(c, np.ndarray)
c2 = c * 2
assert isinstance(c2, Column)
assert isinstance(c2, np.ndarray)
def test_numpy_ops(self, Column):
"""Show that basic numpy operations with Column behave sensibly"""
arr = np.array([1, 2, 3])
c = Column(arr, name='a')
for op, test_equal in ((operator.eq, True),
(operator.ne, False),
(operator.ge, True),
(operator.gt, False),
(operator.le, True),
(operator.lt, False)):
for eq in (op(c, arr), op(arr, c)):
assert np.all(eq) if test_equal else not np.any(eq)
assert len(eq) == 3
if Column is table.Column:
assert type(eq) == np.ndarray
else:
assert type(eq) == np.ma.core.MaskedArray
assert eq.dtype.str == '|b1'
lt = c - 1 < arr
assert np.all(lt)
def test_numpy_boolean_ufuncs(self, Column):
"""Show that basic numpy operations with Column behave sensibly"""
arr = np.array([1, 2, 3])
c = Column(arr, name='a')
for ufunc, test_true in ((np.isfinite, True),
(np.isinf, False),
(np.isnan, False),
(np.sign, True),
(np.signbit, False)):
result = ufunc(c)
assert len(result) == len(c)
assert np.all(result) if test_true else not np.any(result)
if Column is table.Column:
assert type(result) == np.ndarray
else:
assert type(result) == np.ma.core.MaskedArray
if ufunc is not np.sign:
assert result.dtype.str == '|b1'
def test_view(self, Column):
c = np.array([1, 2, 3], dtype=np.int64).view(Column)
assert repr(c) == "<{0} dtype='int64' length=3>\n1\n2\n3".format(Column.__name__)
def test_format(self, Column):
"""Show that the formatted output from str() works"""
from ... import conf
with conf.set_temp('max_lines', 8):
c1 = Column(np.arange(2000), name='a', dtype=float,
format='%6.2f')
assert str(c1).splitlines() == [' a ',
'-------',
' 0.00',
' 1.00',
' ...',
'1998.00',
'1999.00',
'Length = 2000 rows']
def test_convert_numpy_array(self, Column):
d = Column([1, 2, 3], name='a', dtype='i8')
np_data = np.array(d)
assert np.all(np_data == d)
np_data = np.array(d, copy=False)
assert np.all(np_data == d)
np_data = np.array(d, dtype='i4')
assert np.all(np_data == d)
def test_convert_unit(self, Column):
d = Column([1, 2, 3], name='a', dtype="f8", unit="m")
d.convert_unit_to("km")
assert np.all(d.data == [0.001, 0.002, 0.003])
def test_array_wrap(self):
"""Test that the __array_wrap__ method converts a reduction ufunc
output that has a different shape into an ndarray view. Without this a
method call like c.mean() returns a Column array object with length=1."""
# Mean and sum for a 1-d float column
c = table.Column(name='a', data=[1., 2., 3.])
assert np.allclose(c.mean(), 2.0)
assert isinstance(c.mean(), (np.floating, float))
assert np.allclose(c.sum(), 6.)
assert isinstance(c.sum(), (np.floating, float))
# Non-reduction ufunc preserves Column class
assert isinstance(np.cos(c), table.Column)
# Sum for a 1-d int column
c = table.Column(name='a', data=[1, 2, 3])
assert np.allclose(c.sum(), 6)
assert isinstance(c.sum(), (np.integer, int))
# Sum for a 2-d int column
c = table.Column(name='a', data=[[1, 2, 3],
[4, 5, 6]])
assert c.sum() == 21
assert isinstance(c.sum(), (np.integer, int))
assert np.all(c.sum(axis=0) == [5, 7, 9])
assert c.sum(axis=0).shape == (3,)
assert isinstance(c.sum(axis=0), np.ndarray)
# Sum and mean for a 1-d masked column
c = table.MaskedColumn(name='a', data=[1., 2., 3.], mask=[0, 0, 1])
assert np.allclose(c.mean(), 1.5)
assert isinstance(c.mean(), (np.floating, float))
assert np.allclose(c.sum(), 3.)
assert isinstance(c.sum(), (np.floating, float))
def test_name_none(self, Column):
"""Can create a column without supplying name, which defaults to None"""
c = Column([1, 2])
assert c.name is None
assert np.all(c == np.array([1, 2]))
def test_quantity_init(self, Column):
c = Column(data=np.array([1, 2, 3]) * u.m)
assert np.all(c.data == np.array([1, 2, 3]))
assert np.all(c.unit == u.m)
c = Column(data=np.array([1, 2, 3]) * u.m, unit=u.cm)
assert np.all(c.data == np.array([100, 200, 300]))
assert np.all(c.unit == u.cm)
def test_attrs_survive_getitem_after_change(self, Column):
"""
Test for issue #3023: when calling getitem with a MaskedArray subclass
the original object attributes are not copied.
"""
c1 = Column([1, 2, 3], name='a', unit='m', format='%i',
description='aa', meta={'a': 1})
c1.name = 'b'
c1.unit = 'km'
c1.format = '%d'
c1.description = 'bb'
c1.meta = {'bbb': 2}
for item in (slice(None, None), slice(None, 1), np.array([0, 2]),
np.array([False, True, False])):
c2 = c1[item]
assert c2.name == 'b'
assert c2.unit is u.km
assert c2.format == '%d'
assert c2.description == 'bb'
assert c2.meta == {'bbb': 2}
# Make sure that calling getitem resulting in a scalar does
# not copy attributes.
val = c1[1]
for attr in ('name', 'unit', 'format', 'description', 'meta'):
assert not hasattr(val, attr)
def test_to_quantity(self, Column):
d = Column([1, 2, 3], name='a', dtype="f8", unit="m")
assert np.all(d.quantity == ([1, 2, 3.] * u.m))
assert np.all(d.quantity.value == ([1, 2, 3.] * u.m).value)
assert np.all(d.quantity == d.to('m'))
assert np.all(d.quantity.value == d.to('m').value)
np.testing.assert_allclose(d.to(u.km).value, ([.001, .002, .003] * u.km).value)
np.testing.assert_allclose(d.to('km').value, ([.001, .002, .003] * u.km).value)
np.testing.assert_allclose(d.to(u.MHz, u.equivalencies.spectral()).value,
[299.792458, 149.896229, 99.93081933])
d_nounit = Column([1, 2, 3], name='a', dtype="f8", unit=None)
with pytest.raises(u.UnitsError):
d_nounit.to(u.km)
assert np.all(d_nounit.to(u.dimensionless_unscaled) == np.array([1, 2, 3]))
# make sure the correct copy/no copy behavior is happening
q = [1, 3, 5]*u.km
# to should always make a copy
d.to(u.km)[:] = q
np.testing.assert_allclose(d, [1, 2, 3])
# explcit copying of the quantity should not change the column
d.quantity.copy()[:] = q
np.testing.assert_allclose(d, [1, 2, 3])
# but quantity directly is a "view", accessing the underlying column
d.quantity[:] = q
np.testing.assert_allclose(d, [1000, 3000, 5000])
# view should also work for integers
d2 = Column([1, 2, 3], name='a', dtype=int, unit="m")
d2.quantity[:] = q
np.testing.assert_allclose(d2, [1000, 3000, 5000])
# but it should fail for strings or other non-numeric tables
d3 = Column(['arg', 'name', 'stuff'], name='a', unit="m")
with pytest.raises(TypeError):
d3.quantity
def test_item_access_type(self, Column):
"""
Tests for #3095, which forces integer item access to always return a plain
ndarray or MaskedArray, even in the case of a multi-dim column.
"""
integer_types = (int, np.int_)
for int_type in integer_types:
c = Column([[1, 2], [3, 4]])
i0 = int_type(0)
i1 = int_type(1)
assert np.all(c[i0] == [1, 2])
assert type(c[i0]) == (np.ma.MaskedArray if hasattr(Column, 'mask') else np.ndarray)
assert c[i0].shape == (2,)
c01 = c[i0:i1]
assert np.all(c01 == [[1, 2]])
assert isinstance(c01, Column)
assert c01.shape == (1, 2)
c = Column([1, 2])
assert np.all(c[i0] == 1)
assert isinstance(c[i0], np.integer)
assert c[i0].shape == ()
c01 = c[i0:i1]
assert np.all(c01 == [1])
assert isinstance(c01, Column)
assert c01.shape == (1,)
def test_insert_basic(self, Column):
c = Column([0, 1, 2], name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
# Basic insert
c1 = c.insert(1, 100)
assert np.all(c1 == [0, 100, 1, 2])
assert c1.attrs_equal(c)
assert type(c) is type(c1)
if hasattr(c1, 'mask'):
assert c1.data.shape == c1.mask.shape
c1 = c.insert(-1, 100)
assert np.all(c1 == [0, 1, 100, 2])
c1 = c.insert(3, 100)
assert np.all(c1 == [0, 1, 2, 100])
c1 = c.insert(-3, 100)
assert np.all(c1 == [100, 0, 1, 2])
c1 = c.insert(1, [100, 200, 300])
if hasattr(c1, 'mask'):
assert c1.data.shape == c1.mask.shape
# Out of bounds index
with pytest.raises((ValueError, IndexError)):
c1 = c.insert(-4, 100)
with pytest.raises((ValueError, IndexError)):
c1 = c.insert(4, 100)
def test_insert_axis(self, Column):
"""Insert with non-default axis kwarg"""
c = Column([[1, 2], [3, 4]])
c1 = c.insert(1, [5, 6], axis=None)
assert np.all(c1 == [1, 5, 6, 2, 3, 4])
c1 = c.insert(1, [5, 6], axis=1)
assert np.all(c1 == [[1, 5, 2], [3, 6, 4]])
def test_insert_multidim(self, Column):
c = Column([[1, 2],
[3, 4]], name='a', dtype=int)
# Basic insert
c1 = c.insert(1, [100, 200])
assert np.all(c1 == [[1, 2], [100, 200], [3, 4]])
# Broadcast
c1 = c.insert(1, 100)
assert np.all(c1 == [[1, 2], [100, 100], [3, 4]])
# Wrong shape
with pytest.raises(ValueError):
c1 = c.insert(1, [100, 200, 300])
def test_insert_object(self, Column):
c = Column(['a', 1, None], name='a', dtype=object)
# Basic insert
c1 = c.insert(1, [100, 200])
assert np.all(c1 == ['a', [100, 200], 1, None])
def test_insert_masked(self):
c = table.MaskedColumn([0, 1, 2], name='a', fill_value=9999,
mask=[False, True, False])
# Basic insert
c1 = c.insert(1, 100)
assert np.all(c1.data.data == [0, 100, 1, 2])
assert c1.fill_value == 9999
assert np.all(c1.data.mask == [False, False, True, False])
assert type(c) is type(c1)
for mask in (False, True):
c1 = c.insert(1, 100, mask=mask)
assert np.all(c1.data.data == [0, 100, 1, 2])
assert np.all(c1.data.mask == [False, mask, True, False])
def test_insert_masked_multidim(self):
c = table.MaskedColumn([[1, 2],
[3, 4]], name='a', dtype=int)
c1 = c.insert(1, [100, 200], mask=True)
assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]])
assert np.all(c1.data.mask == [[False, False], [True, True], [False, False]])
c1 = c.insert(1, [100, 200], mask=[True, False])
assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]])
assert np.all(c1.data.mask == [[False, False], [True, False], [False, False]])
with pytest.raises(ValueError):
c1 = c.insert(1, [100, 200], mask=[True, False, True])
def test_mask_on_non_masked_table(self):
"""
When table is not masked and trying to set mask on column then
it's Raise AttributeError.
"""
t = table.Table([[1, 2], [3, 4]], names=('a', 'b'), dtype=('i4', 'f8'))
with pytest.raises(AttributeError):
t['a'].mask = [True, False]
class TestAttrEqual():
"""Bunch of tests originally from ATpy that test the attrs_equal method."""
def test_5(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy')
c2 = Column(name='a', dtype=int, unit='mJy')
assert c1.attrs_equal(c2)
def test_6(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert c1.attrs_equal(c2)
def test_7(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='b', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_8(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=float, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_9(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='erg.cm-2.s-1.Hz-1', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_10(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%g',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_11(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='another test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_12(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'e': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_13(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 9, 'd': 12})
assert not c1.attrs_equal(c2)
def test_col_and_masked_col(self):
c1 = table.Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = table.MaskedColumn(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert c1.attrs_equal(c2)
assert c2.attrs_equal(c1)
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
from ...utils.tests.test_metadata import MetaBaseTest
class TestMetaColumn(MetaBaseTest):
test_class = table.Column
args = ()
class TestMetaMaskedColumn(MetaBaseTest):
test_class = table.MaskedColumn
args = ()
def test_getitem_metadata_regression():
"""
Regression test for #1471: MaskedArray does not call __array_finalize__ so
the meta-data was not getting copied over. By overloading _update_from we
are able to work around this bug.
"""
# Make sure that meta-data gets propagated with __getitem__
c = table.Column(data=[1, 2], name='a', description='b', unit='m', format="%i", meta={'c': 8})
assert c[1:2].name == 'a'
assert c[1:2].description == 'b'
assert c[1:2].unit == 'm'
assert c[1:2].format == '%i'
assert c[1:2].meta['c'] == 8
c = table.MaskedColumn(data=[1, 2], name='a', description='b', unit='m', format="%i", meta={'c': 8})
assert c[1:2].name == 'a'
assert c[1:2].description == 'b'
assert c[1:2].unit == 'm'
assert c[1:2].format == '%i'
assert c[1:2].meta['c'] == 8
# As above, but with take() - check the method and the function
c = table.Column(data=[1, 2, 3], name='a', description='b', unit='m', format="%i", meta={'c': 8})
for subset in [c.take([0, 1]), np.take(c, [0, 1])]:
assert subset.name == 'a'
assert subset.description == 'b'
assert subset.unit == 'm'
assert subset.format == '%i'
assert subset.meta['c'] == 8
# Metadata isn't copied for scalar values
for subset in [c.take(0), np.take(c, 0)]:
assert subset == 1
assert subset.shape == ()
assert not isinstance(subset, table.Column)
c = table.MaskedColumn(data=[1, 2, 3], name='a', description='b', unit='m', format="%i", meta={'c': 8})
for subset in [c.take([0, 1]), np.take(c, [0, 1])]:
assert subset.name == 'a'
assert subset.description == 'b'
assert subset.unit == 'm'
assert subset.format == '%i'
assert subset.meta['c'] == 8
# Metadata isn't copied for scalar values
for subset in [c.take(0), np.take(c, 0)]:
assert subset == 1
assert subset.shape == ()
assert not isinstance(subset, table.MaskedColumn)
def test_unicode_guidelines():
arr = np.array([1, 2, 3])
c = table.Column(arr, name='a')
assert_follows_unicode_guidelines(c)
def test_scalar_column():
"""
Column is not designed to hold scalars, but for numpy 1.6 this can happen:
>> type(np.std(table.Column([1, 2])))
astropy.table.column.Column
"""
c = table.Column(1.5)
assert repr(c) == '1.5'
assert str(c) == '1.5'
def test_qtable_column_conversion():
"""
Ensures that a QTable that gets assigned a unit switches to be Quantity-y
"""
qtab = table.QTable([[1, 2], [3, 4.2]], names=['i', 'f'])
assert isinstance(qtab['i'], table.column.Column)
assert isinstance(qtab['f'], table.column.Column)
qtab['i'].unit = 'km/s'
assert isinstance(qtab['i'], u.Quantity)
assert isinstance(qtab['f'], table.column.Column)
# should follow from the above, but good to make sure as a #4497 regression test
assert isinstance(qtab['i'][0], u.Quantity)
assert isinstance(qtab[0]['i'], u.Quantity)
assert not isinstance(qtab['f'][0], u.Quantity)
assert not isinstance(qtab[0]['f'], u.Quantity)
# Regression test for #5342: if a function unit is assigned, the column
# should become the appropriate FunctionQuantity subclass.
qtab['f'].unit = u.dex(u.cm/u.s**2)
assert isinstance(qtab['f'], u.Dex)
@pytest.mark.parametrize('masked', [True, False])
def test_string_truncation_warning(masked):
"""
Test warnings associated with in-place assignment to a string
column that results in truncation of the right hand side.
"""
t = table.Table([['aa', 'bb']], names=['a'], masked=masked)
with catch_warnings() as w:
from inspect import currentframe, getframeinfo
t['a'][1] = 'cc'
assert len(w) == 0
t['a'][:] = 'dd'
assert len(w) == 0
with catch_warnings() as w:
frameinfo = getframeinfo(currentframe())
t['a'][0] = 'eee' # replace item with string that gets truncated
assert t['a'][0] == 'ee'
assert len(w) == 1
assert ('truncated right side string(s) longer than 2 character(s)'
in str(w[0].message))
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert w[0].category is table.StringTruncateWarning
assert 'test_column' in w[0].filename
with catch_warnings() as w:
t['a'][:] = ['ff', 'ggg'] # replace item with string that gets truncated
assert np.all(t['a'] == ['ff', 'gg'])
assert len(w) == 1
assert ('truncated right side string(s) longer than 2 character(s)'
in str(w[0].message))
with catch_warnings() as w:
# Test the obscure case of assigning from an array that was originally
# wider than any of the current elements (i.e. dtype is U4 but actual
# elements are U1 at the time of assignment).
val = np.array(['ffff', 'gggg'])
val[:] = ['f', 'g']
t['a'][:] = val
assert np.all(t['a'] == ['f', 'g'])
assert len(w) == 0
def test_string_truncation_warning_masked():
"""
Test warnings associated with in-place assignment to a string
to a masked column, specifically where the right hand side
contains np.ma.masked.
"""
# Test for strings, but also cover assignment of np.ma.masked to
# int and float masked column setting. This was previously only
# covered in an unrelated io.ascii test (test_line_endings) which
# showed an unexpected difference between handling of str and numeric
# masked arrays.
for values in (['a', 'b'], [1, 2], [1.0, 2.0]):
mc = table.MaskedColumn(values)
with catch_warnings() as w:
mc[1] = np.ma.masked
assert len(w) == 0
assert np.all(mc.mask == [False, True])
mc[:] = np.ma.masked
assert len(w) == 0
assert np.all(mc.mask == [True, True])
mc = table.MaskedColumn(['aa', 'bb'])
with catch_warnings() as w:
mc[:] = [np.ma.masked, 'ggg'] # replace item with string that gets truncated
assert mc[1] == 'gg'
assert np.all(mc.mask == [True, False])
assert len(w) == 1
assert ('truncated right side string(s) longer than 2 character(s)'
in str(w[0].message))
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_create_from_str(Column):
"""
Create a bytestring Column from strings (including unicode) in Py3.
"""
# a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding.
# Stress the system by injecting non-ASCII characters.
uba = u'bä'
c = Column([uba, 'def'], dtype='S')
assert c.dtype.char == 'S'
assert c[0] == uba
assert isinstance(c[0], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([uba, 'def']))
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_bytes(Column):
"""
Create a bytestring Column from bytes and ensure that it works in Python 3 in
a convenient way like in Python 2.
"""
# a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding.
# Stress the system by injecting non-ASCII characters.
uba = u'bä'
uba8 = uba.encode('utf-8')
c = Column([uba8, b'def'])
assert c.dtype.char == 'S'
assert c[0] == uba
assert isinstance(c[0], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([uba, 'def']))
assert isinstance(c[:], table.Column)
assert c[:].dtype.char == 'S'
# Array / list comparisons
assert np.all(c == [uba, 'def'])
ok = c == [uba8, b'def']
assert type(ok) is type(c.data)
assert ok.dtype.char == '?'
assert np.all(ok)
assert np.all(c == np.array([uba, u'def']))
assert np.all(c == np.array([uba8, b'def']))
# Scalar compare
cmps = (uba, uba8)
for cmp in cmps:
ok = c == cmp
assert type(ok) is type(c.data)
assert np.all(ok == [True, False])
def test_col_unicode_sandwich_unicode():
"""
Sanity check that Unicode Column behaves normally.
"""
# On Py2 the unicode must be ASCII-compatible, else the final test fails.
uba = u'bä'
uba8 = uba.encode('utf-8')
c = table.Column([uba, 'def'], dtype='U')
assert c[0] == uba
assert isinstance(c[:0], table.Column)
assert isinstance(c[0], str)
assert np.all(c[:2] == np.array([uba, 'def']))
assert isinstance(c[:], table.Column)
assert c[:].dtype.char == 'U'
ok = c == [uba, 'def']
assert type(ok) == np.ndarray
assert ok.dtype.char == '?'
assert np.all(ok)
assert np.all(c != [uba8, b'def'])
def test_masked_col_unicode_sandwich():
"""
Create a bytestring MaskedColumn and ensure that it works in Python 3 in
a convenient way like in Python 2.
"""
c = table.MaskedColumn([b'abc', b'def'])
c[1] = np.ma.masked
assert isinstance(c[:0], table.MaskedColumn)
assert isinstance(c[0], str)
assert c[0] == 'abc'
assert c[1] is np.ma.masked
assert isinstance(c[:], table.MaskedColumn)
assert c[:].dtype.char == 'S'
ok = c == ['abc', 'def']
assert ok[0] == True
assert ok[1] is np.ma.masked
assert np.all(c == [b'abc', b'def'])
assert np.all(c == np.array([u'abc', u'def']))
assert np.all(c == np.array([b'abc', b'def']))
for cmp in (u'abc', b'abc'):
ok = c == cmp
assert type(ok) is np.ma.MaskedArray
assert ok[0] == True
assert ok[1] is np.ma.masked
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_unicode_sandwich_set(Column):
"""
Test setting
"""
uba = u'bä'
c = Column([b'abc', b'def'])
c[0] = b'aa'
assert np.all(c == [u'aa', u'def'])
c[0] = uba # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding
assert np.all(c == [uba, u'def'])
assert c.pformat() == [u'None', u'----', ' ' + uba, u' def']
c[:] = b'cc'
assert np.all(c == [u'cc', u'cc'])
c[:] = uba
assert np.all(c == [uba, uba])
c[:] = ''
c[:] = [uba, b'def']
assert np.all(c == [uba, b'def'])
@pytest.mark.parametrize('class1', [table.MaskedColumn, table.Column])
@pytest.mark.parametrize('class2', [table.MaskedColumn, table.Column, str, list])
def test_unicode_sandwich_compare(class1, class2):
"""Test that comparing a bytestring Column/MaskedColumn with various
str (unicode) object types gives the expected result. Tests #6838.
"""
obj1 = class1([b'a', b'c'])
if class2 is str:
obj2 = 'a'
elif class2 is list:
obj2 = ['a', 'b']
else:
obj2 = class2(['a', 'b'])
assert np.all((obj1 == obj2) == [True, False])
assert np.all((obj2 == obj1) == [True, False])
assert np.all((obj1 != obj2) == [False, True])
assert np.all((obj2 != obj1) == [False, True])
assert np.all((obj1 > obj2) == [False, True])
assert np.all((obj2 > obj1) == [False, False])
assert np.all((obj1 <= obj2) == [True, False])
assert np.all((obj2 <= obj1) == [True, True])
assert np.all((obj1 < obj2) == [False, False])
assert np.all((obj2 < obj1) == [False, True])
assert np.all((obj1 >= obj2) == [True, True])
assert np.all((obj2 >= obj1) == [True, False])
def test_unicode_sandwich_masked_compare():
"""Test the fix for #6839 from #6899."""
c1 = table.MaskedColumn(['a', 'b', 'c', 'd'],
mask=[True, False, True, False])
c2 = table.MaskedColumn([b'a', b'b', b'c', b'd'],
mask=[True, True, False, False])
for cmp in ((c1 == c2), (c2 == c1)):
assert cmp[0] is np.ma.masked
assert cmp[1] is np.ma.masked
assert cmp[2] is np.ma.masked
assert cmp[3]
for cmp in ((c1 != c2), (c2 != c1)):
assert cmp[0] is np.ma.masked
assert cmp[1] is np.ma.masked
assert cmp[2] is np.ma.masked
assert not cmp[3]
# Note: comparisons <, >, >=, <= fail to return a masked array entirely,
# see https://github.com/numpy/numpy/issues/10092.
|
af041988aa0fc1cba18c2392296755bcca71f7adf8ea7d1ae6a3d5980bed0a58 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import pytest
import numpy as np
from ... import table
from ...table import Row
from ... import units as u
from .conftest import MaskedTable
def test_masked_row_with_object_col():
"""
Numpy < 1.8 has a bug in masked array that prevents access a row if there is
a column with object type.
"""
t = table.Table([[1]], dtype=['O'], masked=True)
t['col0'].mask = False
assert t[0]['col0'] == 1
t['col0'].mask = True
assert t[0]['col0'] is np.ma.masked
@pytest.mark.usefixtures('table_types')
class TestRow():
def _setup(self, table_types):
self._table_type = table_types.Table
self._column_type = table_types.Column
@property
def t(self):
# py.test wants to run this method once before table_types is run
# to set Table and Column. In this case just return None, which would
# cause any downstream test to fail if this happened in any other context.
if self._column_type is None:
return None
if not hasattr(self, '_t'):
a = self._column_type(name='a', data=[1, 2, 3], dtype='i8')
b = self._column_type(name='b', data=[4, 5, 6], dtype='i8')
self._t = self._table_type([a, b])
return self._t
def test_subclass(self, table_types):
"""Row is subclass of ndarray and Row"""
self._setup(table_types)
c = Row(self.t, 2)
assert isinstance(c, Row)
def test_values(self, table_types):
"""Row accurately reflects table values and attributes"""
self._setup(table_types)
table = self.t
row = table[1]
assert row['a'] == 2
assert row['b'] == 5
assert row[0] == 2
assert row[1] == 5
assert row.meta is table.meta
assert row.colnames == table.colnames
assert row.columns is table.columns
with pytest.raises(IndexError):
row[2]
if sys.byteorder == 'little':
assert str(row.dtype) == "[('a', '<i8'), ('b', '<i8')]"
else:
assert str(row.dtype) == "[('a', '>i8'), ('b', '>i8')]"
def test_ref(self, table_types):
"""Row is a reference into original table data"""
self._setup(table_types)
table = self.t
row = table[1]
row['a'] = 10
if table_types.Table is not MaskedTable:
assert table['a'][1] == 10
def test_left_equal(self, table_types):
"""Compare a table row to the corresponding structured array row"""
self._setup(table_types)
np_t = self.t.as_array()
if table_types.Table is MaskedTable:
with pytest.raises(ValueError):
self.t[0] == np_t[0]
else:
for row, np_row in zip(self.t, np_t):
assert np.all(row == np_row)
def test_left_not_equal(self, table_types):
"""Compare a table row to the corresponding structured array row"""
self._setup(table_types)
np_t = self.t.as_array()
np_t['a'] = [0, 0, 0]
if table_types.Table is MaskedTable:
with pytest.raises(ValueError):
self.t[0] == np_t[0]
else:
for row, np_row in zip(self.t, np_t):
assert np.all(row != np_row)
def test_right_equal(self, table_types):
"""Test right equal"""
self._setup(table_types)
np_t = self.t.as_array()
if table_types.Table is MaskedTable:
with pytest.raises(ValueError):
self.t[0] == np_t[0]
else:
for row, np_row in zip(self.t, np_t):
assert np.all(np_row == row)
def test_convert_numpy_array(self, table_types):
self._setup(table_types)
d = self.t[1]
np_data = np.array(d)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_void())
assert np_data is not d.as_void()
assert d.colnames == list(np_data.dtype.names)
np_data = np.array(d, copy=False)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_void())
assert np_data is not d.as_void()
assert d.colnames == list(np_data.dtype.names)
with pytest.raises(ValueError):
np_data = np.array(d, dtype=[(str('c'), 'i8'), (str('d'), 'i8')])
def test_format_row(self, table_types):
"""Test formatting row"""
self._setup(table_types)
table = self.t
row = table[0]
assert repr(row).splitlines() == ['<{0} {1}{2}>'
.format(row.__class__.__name__,
'index=0',
' masked=True' if table.masked else ''),
' a b ',
'int64 int64',
'----- -----',
' 1 4']
assert str(row).splitlines() == [' a b ',
'--- ---',
' 1 4']
assert row._repr_html_().splitlines() == ['<i>{0} {1}{2}</i>'
.format(row.__class__.__name__,
'index=0',
' masked=True' if table.masked else ''),
'<table id="table{0}">'.format(id(table)),
'<thead><tr><th>a</th><th>b</th></tr></thead>',
'<thead><tr><th>int64</th><th>int64</th></tr></thead>',
'<tr><td>1</td><td>4</td></tr>',
'</table>']
def test_as_void(self, table_types):
"""Test the as_void() method"""
self._setup(table_types)
table = self.t
row = table[0]
# If masked then with no masks, issue numpy/numpy#483 should come
# into play. Make sure as_void() code is working.
row_void = row.as_void()
if table.masked:
assert isinstance(row_void, np.ma.mvoid)
else:
assert isinstance(row_void, np.void)
assert row_void['a'] == 1
assert row_void['b'] == 4
# Confirm row is a view of table but row_void is not.
table['a'][0] = -100
assert row['a'] == -100
assert row_void['a'] == 1
# Make sure it works for a table that has masked elements
if table.masked:
table['a'].mask = True
# row_void is not a view, need to re-make
assert row_void['a'] == 1
row_void = row.as_void() # but row is a view
assert row['a'] is np.ma.masked
def test_row_and_as_void_with_objects(self, table_types):
"""Test the deprecated data property and as_void() method"""
t = table_types.Table([[{'a': 1}, {'b': 2}]], names=('a',))
assert t[0][0] == {'a': 1}
assert t[0]['a'] == {'a': 1}
assert t[0].as_void()[0] == {'a': 1}
assert t[0].as_void()['a'] == {'a': 1}
def test_bounds_checking(self, table_types):
"""Row gives index error upon creation for out-of-bounds index"""
self._setup(table_types)
for ibad in (-5, -4, 3, 4):
with pytest.raises(IndexError):
self.t[ibad]
def test_row_tuple_column_slice():
"""
Test getting and setting a row using a tuple or list of column names
"""
t = table.QTable([[1, 2, 3] * u.m,
[10., 20., 30.],
[100., 200., 300.],
['x', 'y', 'z']], names=['a', 'b', 'c', 'd'])
# Get a row for index=1
r1 = t[1]
# Column slice with tuple of col names
r1_abc = r1['a', 'b', 'c'] # Row object for these cols
r1_abc_repr = ['<Row index=1>',
' a b c ',
' m ',
'float64 float64 float64',
'------- ------- -------',
' 2.0 20.0 200.0']
assert repr(r1_abc).splitlines() == r1_abc_repr
# Column slice with list of col names
r1_abc = r1[['a', 'b', 'c']]
assert repr(r1_abc).splitlines() == r1_abc_repr
# Make sure setting on a tuple or slice updates parent table and row
r1['c'] = 1000
r1['a', 'b'] = 1000 * u.cm, 100.
assert r1['a'] == 10 * u.m
assert r1['b'] == 100
assert t['a'][1] == 10 * u.m
assert t['b'][1] == 100.
assert t['c'][1] == 1000
# Same but using a list of column names instead of tuple
r1[['a', 'b']] = 2000 * u.cm, 200.
assert r1['a'] == 20 * u.m
assert r1['b'] == 200
assert t['a'][1] == 20 * u.m
assert t['b'][1] == 200.
# Set column slice of column slice
r1_abc['a', 'c'] = -1 * u.m, -10
assert t['a'][1] == -1 * u.m
assert t['b'][1] == 200.
assert t['c'][1] == -10.
# Bad column name
with pytest.raises(KeyError) as err:
t[1]['a', 'not_there']
assert "KeyError: 'not_there'" in str(err)
# Too many values
with pytest.raises(ValueError) as err:
t[1]['a', 'b'] = 1 * u.m, 2, 3
assert 'right hand side must be a sequence' in str(err)
# Something without a length
with pytest.raises(ValueError) as err:
t[1]['a', 'b'] = 1
assert 'right hand side must be a sequence' in str(err)
def test_row_tuple_column_slice_transaction():
"""
Test that setting a row that fails part way through does not
change the table at all.
"""
t = table.QTable([[10., 20., 30.],
[1, 2, 3] * u.m], names=['a', 'b'])
tc = t.copy()
# First one succeeds but second fails.
with pytest.raises(ValueError) as err:
t[1]['a', 'b'] = (-1, -1 * u.s) # Bad unit
assert "'s' (time) and 'm' (length) are not convertible" in str(err)
assert t[1] == tc[1]
def test_uint_indexing():
"""
Test that accessing a row with an unsigned integer
works as with a signed integer. Similarly tests
that printing such a row works.
This is non-trivial: adding a signed and unsigned
integer in numpy results in a float, which is an
invalid slice index.
Regression test for gh-7464.
"""
t = table.Table([[1., 2., 3.]], names='a')
assert t['a'][1] == 2.
assert t['a'][np.int(1)] == 2.
assert t['a'][np.uint(1)] == 2.
assert t[np.uint(1)]['a'] == 2.
trepr = ['<Row index=1>',
' a ',
'float64',
'-------',
' 2.0']
assert repr(t[1]).splitlines() == trepr
assert repr(t[np.int(1)]).splitlines() == trepr
assert repr(t[np.uint(1)]).splitlines() == trepr
|
c112bdb974fe80e01294ecbc43bbb9ded642877837f281a2ab545224a4341c36 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
try:
import h5py # pylint: disable=W0611
except ImportError:
HAS_H5PY = False
else:
HAS_H5PY = True
try:
import yaml # pylint: disable=W0611
HAS_YAML = True
except ImportError:
HAS_YAML = False
import copy
import pickle
from io import StringIO
import pytest
import numpy as np
from ...coordinates import EarthLocation
from ...table import Table, QTable, join, hstack, vstack, Column, NdarrayMixin
from ...table import serialize
from ... import time
from ... import coordinates
from ... import units as u
from ..column import BaseColumn
from .. import table_helpers
from .conftest import MIXIN_COLS
def test_attributes(mixin_cols):
"""
Required attributes for a column can be set.
"""
m = mixin_cols['m']
m.info.name = 'a'
assert m.info.name == 'a'
m.info.description = 'a'
assert m.info.description == 'a'
# Cannot set unit for these classes
if isinstance(m, (u.Quantity, coordinates.SkyCoord, time.Time)):
with pytest.raises(AttributeError):
m.info.unit = u.m
else:
m.info.unit = u.m
assert m.info.unit is u.m
m.info.format = 'a'
assert m.info.format == 'a'
m.info.meta = {'a': 1}
assert m.info.meta == {'a': 1}
with pytest.raises(AttributeError):
m.info.bad_attr = 1
with pytest.raises(AttributeError):
m.info.bad_attr
def check_mixin_type(table, table_col, in_col):
# We check for QuantityInfo rather than just isinstance(col, u.Quantity)
# since we want to treat EarthLocation as a mixin, even though it is
# a Quantity subclass.
if ((isinstance(in_col.info, u.QuantityInfo) and type(table) is not QTable)
or isinstance(in_col, Column)):
assert type(table_col) is table.ColumnClass
else:
assert type(table_col) is type(in_col)
# Make sure in_col got copied and creating table did not touch it
assert in_col.info.name is None
def test_make_table(table_types, mixin_cols):
"""
Make a table with the columns in mixin_cols, which is an ordered dict of
three cols: 'a' and 'b' are table_types.Column type, and 'm' is a mixin.
"""
t = table_types.Table(mixin_cols)
check_mixin_type(t, t['m'], mixin_cols['m'])
cols = list(mixin_cols.values())
t = table_types.Table(cols, names=('i', 'a', 'b', 'm'))
check_mixin_type(t, t['m'], mixin_cols['m'])
t = table_types.Table(cols)
check_mixin_type(t, t['col3'], mixin_cols['m'])
def test_io_ascii_write():
"""
Test that table with mixin column can be written by io.ascii for
every pure Python writer. No validation of the output is done,
this just confirms no exceptions.
"""
from ...io.ascii.connect import _get_connectors_table
t = QTable(MIXIN_COLS)
for fmt in _get_connectors_table():
if fmt['Format'] == 'ascii.ecsv' and not HAS_YAML:
continue
if fmt['Write'] and '.fast_' not in fmt['Format']:
out = StringIO()
t.write(out, format=fmt['Format'])
def test_votable_quantity_write(tmpdir):
"""
Test that table with Quantity mixin column can be round-tripped by
io.votable. Note that FITS and HDF5 mixin support are tested (much more
thoroughly) in their respective subpackage tests
(io/fits/tests/test_connect.py and io/misc/tests/test_hdf5.py).
"""
t = QTable()
t['a'] = u.Quantity([1, 2, 4], unit='Angstrom')
filename = str(tmpdir.join('table-tmp'))
t.write(filename, format='votable', overwrite=True)
qt = QTable.read(filename, format='votable')
assert isinstance(qt['a'], u.Quantity)
assert qt['a'].unit == 'Angstrom'
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_write_fits_standard(tmpdir, table_types):
"""
Test that table with Time mixin columns can be written by io.fits.
Validation of the output is done. Test that io.fits writes a table
containing Time mixin columns that can be partially round-tripped
(metadata scale, location).
Note that we postpone checking the "local" scale, since that cannot
be done with format 'cxcsec', as it requires an epoch.
"""
t = table_types([[1, 2], ['string', 'column']])
for scale in time.STANDARD_TIME_SCALES:
t['a'+scale] = time.Time([[1, 2], [3, 4]], format='cxcsec',
scale=scale, location=EarthLocation(
-2446354, 4237210, 4077985, unit='m'))
t['b'+scale] = time.Time(['1999-01-01T00:00:00.123456789',
'2010-01-01T00:00:00'], scale=scale)
t['c'] = [3., 4.]
filename = str(tmpdir.join('table-tmp'))
# Show that FITS format succeeds
t.write(filename, format='fits', overwrite=True)
tm = table_types.read(filename, format='fits', astropy_native=True)
for scale in time.STANDARD_TIME_SCALES:
for ab in ('a', 'b'):
name = ab + scale
# Assert that the time columns are read as Time
assert isinstance(tm[name], time.Time)
# Assert that the scales round-trip
assert tm[name].scale == t[name].scale
# Assert that the format is jd
assert tm[name].format == 'jd'
# Assert that the location round-trips
assert tm[name].location == t[name].location
# Finally assert that the column data round-trips
assert (tm[name] == t[name]).all()
for name in ('col0', 'col1', 'c'):
# Assert that the non-time columns are read as Column
assert isinstance(tm[name], Column)
# Assert that the non-time columns' data round-trips
assert (tm[name] == t[name]).all()
# Test for conversion of time data to its value, as defined by its format
for scale in time.STANDARD_TIME_SCALES:
for ab in ('a', 'b'):
name = ab + scale
t[name].info.serialize_method['fits'] = 'formatted_value'
t.write(filename, format='fits', overwrite=True)
tm = table_types.read(filename, format='fits')
for scale in time.STANDARD_TIME_SCALES:
for ab in ('a', 'b'):
name = ab + scale
assert not isinstance(tm[name], time.Time)
assert (tm[name] == t[name].value).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_write_fits_local(tmpdir, table_types):
"""
Test that table with a Time mixin with scale local can also be written
by io.fits. Like ``test_io_time_write_fits_standard`` above, but avoiding
``cxcsec`` format, which requires an epoch and thus cannot be used for a
local time scale.
"""
t = table_types([[1, 2], ['string', 'column']])
t['a_local'] = time.Time([[50001, 50002], [50003, 50004]],
format='mjd', scale='local',
location=EarthLocation(-2446354, 4237210, 4077985,
unit='m'))
t['b_local'] = time.Time(['1999-01-01T00:00:00.123456789',
'2010-01-01T00:00:00'], scale='local')
t['c'] = [3., 4.]
filename = str(tmpdir.join('table-tmp'))
# Show that FITS format succeeds
t.write(filename, format='fits', overwrite=True)
tm = table_types.read(filename, format='fits', astropy_native=True)
for ab in ('a', 'b'):
name = ab + '_local'
# Assert that the time columns are read as Time
assert isinstance(tm[name], time.Time)
# Assert that the scales round-trip
assert tm[name].scale == t[name].scale
# Assert that the format is jd
assert tm[name].format == 'jd'
# Assert that the location round-trips
assert tm[name].location == t[name].location
# Finally assert that the column data round-trips
assert (tm[name] == t[name]).all()
for name in ('col0', 'col1', 'c'):
# Assert that the non-time columns are read as Column
assert isinstance(tm[name], Column)
# Assert that the non-time columns' data round-trips
assert (tm[name] == t[name]).all()
# Test for conversion of time data to its value, as defined by its format.
for ab in ('a', 'b'):
name = ab + '_local'
t[name].info.serialize_method['fits'] = 'formatted_value'
t.write(filename, format='fits', overwrite=True)
tm = table_types.read(filename, format='fits')
for ab in ('a', 'b'):
name = ab + '_local'
assert not isinstance(tm[name], time.Time)
assert (tm[name] == t[name].value).all()
def test_votable_mixin_write_fail(mixin_cols):
"""
Test that table with mixin columns (excluding Quantity) cannot be written by
io.votable.
"""
t = QTable(mixin_cols)
# Only do this test if there are unsupported column types (i.e. anything besides
# BaseColumn and Quantity class instances).
unsupported_cols = t.columns.not_isinstance((BaseColumn, u.Quantity))
if not unsupported_cols:
pytest.skip("no unsupported column types")
out = StringIO()
with pytest.raises(ValueError) as err:
t.write(out, format='votable')
assert 'cannot write table with mixin column(s)' in str(err.value)
def test_join(table_types):
"""
Join tables with mixin cols. Use column "i" as proxy for what the
result should be for each mixin.
"""
t1 = table_types.Table()
t1['a'] = table_types.Column(['a', 'b', 'b', 'c'])
t1['i'] = table_types.Column([0, 1, 2, 3])
for name, col in MIXIN_COLS.items():
t1[name] = col
t2 = table_types.Table(t1)
t2['a'] = ['b', 'c', 'a', 'd']
for name, col in MIXIN_COLS.items():
t1[name].info.description = name
t2[name].info.description = name + '2'
for join_type in ('inner', 'left'):
t12 = join(t1, t2, keys='a', join_type=join_type)
idx1 = t12['i_1']
idx2 = t12['i_2']
for name, col in MIXIN_COLS.items():
name1 = name + '_1'
name2 = name + '_2'
assert_table_name_col_equal(t12, name1, col[idx1])
assert_table_name_col_equal(t12, name2, col[idx2])
assert t12[name1].info.description == name
assert t12[name2].info.description == name + '2'
for join_type in ('outer', 'right'):
with pytest.raises(NotImplementedError) as exc:
t12 = join(t1, t2, keys='a', join_type=join_type)
assert 'join requires masking column' in str(exc.value)
with pytest.raises(ValueError) as exc:
t12 = join(t1, t2, keys=['a', 'skycoord'])
assert 'not allowed as a key column' in str(exc.value)
# Join does work for a mixin which is a subclass of np.ndarray
t12 = join(t1, t2, keys=['quantity'])
assert np.all(t12['a_1'] == t1['a'])
def test_hstack(table_types):
"""
Hstack tables with mixin cols. Use column "i" as proxy for what the
result should be for each mixin.
"""
t1 = table_types.Table()
t1['i'] = table_types.Column([0, 1, 2, 3])
for name, col in MIXIN_COLS.items():
t1[name] = col
t1[name].info.description = name
t1[name].info.meta = {'a': 1}
for join_type in ('inner', 'outer'):
for chop in (True, False):
t2 = table_types.Table(t1)
if chop:
t2 = t2[:-1]
if join_type == 'outer':
with pytest.raises(NotImplementedError) as exc:
t12 = hstack([t1, t2], join_type=join_type)
assert 'hstack requires masking column' in str(exc.value)
continue
t12 = hstack([t1, t2], join_type=join_type)
idx1 = t12['i_1']
idx2 = t12['i_2']
for name, col in MIXIN_COLS.items():
name1 = name + '_1'
name2 = name + '_2'
assert_table_name_col_equal(t12, name1, col[idx1])
assert_table_name_col_equal(t12, name2, col[idx2])
for attr in ('description', 'meta'):
assert getattr(t1[name].info, attr) == getattr(t12[name1].info, attr)
assert getattr(t2[name].info, attr) == getattr(t12[name2].info, attr)
def assert_table_name_col_equal(t, name, col):
"""
Assert all(t[name] == col), with special handling for known mixin cols.
"""
if isinstance(col, coordinates.SkyCoord):
assert np.all(t[name].ra == col.ra)
assert np.all(t[name].dec == col.dec)
elif isinstance(col, u.Quantity):
if type(t) is QTable:
assert np.all(t[name] == col)
elif isinstance(col, table_helpers.ArrayWrapper):
assert np.all(t[name].data == col.data)
else:
assert np.all(t[name] == col)
def test_get_items(mixin_cols):
"""
Test that slicing / indexing table gives right values and col attrs inherit
"""
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
m = mixin_cols['m']
m.info.name = 'm'
m.info.format = '{0}'
m.info.description = 'd'
m.info.meta = {'a': 1}
t = QTable([m])
for item in ([1, 3], np.array([0, 2]), slice(1, 3)):
t2 = t[item]
m2 = m[item]
assert_table_name_col_equal(t2, 'm', m[item])
for attr in attrs:
assert getattr(t2['m'].info, attr) == getattr(m.info, attr)
assert getattr(m2.info, attr) == getattr(m.info, attr)
def test_info_preserved_pickle_copy_init(mixin_cols):
"""
Test copy, pickle, and init from class roundtrip preserve info. This
tests not only the mixin classes but a regular column as well.
"""
def pickle_roundtrip(c):
return pickle.loads(pickle.dumps(c))
def init_from_class(c):
return c.__class__(c)
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
for colname in ('i', 'm'):
m = mixin_cols[colname]
m.info.name = colname
m.info.format = '{0}'
m.info.description = 'd'
m.info.meta = {'a': 1}
for func in (copy.copy, copy.deepcopy, pickle_roundtrip, init_from_class):
m2 = func(m)
for attr in attrs:
assert getattr(m2.info, attr) == getattr(m.info, attr)
def test_add_column(mixin_cols):
"""
Test that adding a column preserves values and attributes
"""
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
m = mixin_cols['m']
assert m.info.name is None
# Make sure adding column in various ways doesn't touch
t = QTable([m], names=['a'])
assert m.info.name is None
t['new'] = m
assert m.info.name is None
m.info.name = 'm'
m.info.format = '{0}'
m.info.description = 'd'
m.info.meta = {'a': 1}
t = QTable([m])
# Add columns m2, m3, m4 by two different methods and test expected equality
t['m2'] = m
m.info.name = 'm3'
t.add_columns([m], copy=True)
m.info.name = 'm4'
t.add_columns([m], copy=False)
for name in ('m2', 'm3', 'm4'):
assert_table_name_col_equal(t, name, m)
for attr in attrs:
if attr != 'name':
assert getattr(t['m'].info, attr) == getattr(t[name].info, attr)
# Also check that one can set using a scalar.
s = m[0]
if type(s) is type(m):
# We're not going to worry about testing classes for which scalars
# are a different class than the real array (and thus loose info, etc.)
t['s'] = m[0]
assert_table_name_col_equal(t, 's', m[0])
for attr in attrs:
if attr != 'name':
assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr)
# While we're add it, also check a length-1 table.
t = QTable([m[1:2]], names=['m'])
if type(s) is type(m):
t['s'] = m[0]
assert_table_name_col_equal(t, 's', m[0])
for attr in attrs:
if attr != 'name':
assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr)
def test_vstack():
"""
Vstack tables with mixin cols.
"""
t1 = QTable(MIXIN_COLS)
t2 = QTable(MIXIN_COLS)
with pytest.raises(NotImplementedError):
vstack([t1, t2])
def test_insert_row(mixin_cols):
"""
Test inserting a row, which only works for BaseColumn and Quantity
"""
t = QTable(mixin_cols)
t['m'].info.description = 'd'
if isinstance(t['m'], (u.Quantity, Column)):
t.insert_row(1, t[-1])
assert t[1] == t[-1]
assert t['m'].info.description == 'd'
else:
with pytest.raises(ValueError) as exc:
t.insert_row(1, t[-1])
assert "Unable to insert row" in str(exc.value)
def test_insert_row_bad_unit():
"""
Insert a row into a QTable with the wrong unit
"""
t = QTable([[1] * u.m])
with pytest.raises(ValueError) as exc:
t.insert_row(0, (2 * u.m / u.s,))
assert "'m / s' (speed) and 'm' (length) are not convertible" in str(exc.value)
def test_convert_np_array(mixin_cols):
"""
Test that converting to numpy array creates an object dtype and that
each instance in the array has the expected type.
"""
t = QTable(mixin_cols)
ta = t.as_array()
m = mixin_cols['m']
dtype_kind = m.dtype.kind if hasattr(m, 'dtype') else 'O'
assert ta['m'].dtype.kind == dtype_kind
def test_assignment_and_copy():
"""
Test that assignment of an int, slice, and fancy index works.
Along the way test that copying table works.
"""
for name in ('quantity', 'arraywrap'):
m = MIXIN_COLS[name]
t0 = QTable([m], names=['m'])
for i0, i1 in ((1, 2),
(slice(0, 2), slice(1, 3)),
(np.array([1, 2]), np.array([2, 3]))):
t = t0.copy()
t['m'][i0] = m[i1]
if name == 'arraywrap':
assert np.all(t['m'].data[i0] == m.data[i1])
assert np.all(t0['m'].data[i0] == m.data[i0])
assert np.all(t0['m'].data[i0] != t['m'].data[i0])
else:
assert np.all(t['m'][i0] == m[i1])
assert np.all(t0['m'][i0] == m[i0])
assert np.all(t0['m'][i0] != t['m'][i0])
def test_conversion_qtable_table():
"""
Test that a table round trips from QTable => Table => QTable
"""
qt = QTable(MIXIN_COLS)
names = qt.colnames
for name in names:
qt[name].info.description = name
t = Table(qt)
for name in names:
assert t[name].info.description == name
if name == 'quantity':
assert np.all(t['quantity'] == qt['quantity'].value)
assert np.all(t['quantity'].unit is qt['quantity'].unit)
assert isinstance(t['quantity'], t.ColumnClass)
else:
assert_table_name_col_equal(t, name, qt[name])
qt2 = QTable(qt)
for name in names:
assert qt2[name].info.description == name
assert_table_name_col_equal(qt2, name, qt[name])
def test_setitem_as_column_name():
"""
Test for mixin-related regression described in #3321.
"""
t = Table()
t['a'] = ['x', 'y']
t['b'] = 'b' # Previously was failing with KeyError
assert np.all(t['a'] == ['x', 'y'])
assert np.all(t['b'] == ['b', 'b'])
def test_quantity_representation():
"""
Test that table representation of quantities does not have unit
"""
t = QTable([[1, 2] * u.m])
assert t.pformat() == ['col0',
' m ',
'----',
' 1.0',
' 2.0']
def test_skycoord_representation():
"""
Test that skycoord representation works, both in the way that the
values are output and in changing the frame representation.
"""
# With no unit we get "None" in the unit row
c = coordinates.SkyCoord([0], [1], [0], representation='cartesian')
t = Table([c])
assert t.pformat() == [' col0 ',
'None,None,None',
'--------------',
' 0.0,1.0,0.0']
# Test that info works with a dynamically changed representation
c = coordinates.SkyCoord([0], [1], [0], unit='m', representation='cartesian')
t = Table([c])
assert t.pformat() == [' col0 ',
' m,m,m ',
'-----------',
'0.0,1.0,0.0']
t['col0'].representation = 'unitspherical'
assert t.pformat() == [' col0 ',
'deg,deg ',
'--------',
'90.0,0.0']
t['col0'].representation = 'cylindrical'
assert t.pformat() == [' col0 ',
' m,deg,m ',
'------------',
'1.0,90.0,0.0']
def test_ndarray_mixin():
"""
Test directly adding a plain structured array into a table instead of the
view as an NdarrayMixin. Once added as an NdarrayMixin then all the previous
tests apply.
"""
a = np.array([(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')],
dtype='<i4,' + ('|U1'))
b = np.array([(10, 'aa'), (20, 'bb'), (30, 'cc'), (40, 'dd')],
dtype=[('x', 'i4'), ('y', ('U2'))])
c = np.rec.fromrecords([(100, 'raa'), (200, 'rbb'), (300, 'rcc'), (400, 'rdd')],
names=['rx', 'ry'])
d = np.arange(8).reshape(4, 2).view(NdarrayMixin)
# Add one during initialization and the next as a new column.
t = Table([a], names=['a'])
t['b'] = b
t['c'] = c
t['d'] = d
assert isinstance(t['a'], NdarrayMixin)
assert t['a'][1][1] == a[1][1]
assert t['a'][2][0] == a[2][0]
assert t[1]['a'][1] == a[1][1]
assert t[2]['a'][0] == a[2][0]
assert isinstance(t['b'], NdarrayMixin)
assert t['b'][1]['x'] == b[1]['x']
assert t['b'][1]['y'] == b[1]['y']
assert t[1]['b']['x'] == b[1]['x']
assert t[1]['b']['y'] == b[1]['y']
assert isinstance(t['c'], NdarrayMixin)
assert t['c'][1]['rx'] == c[1]['rx']
assert t['c'][1]['ry'] == c[1]['ry']
assert t[1]['c']['rx'] == c[1]['rx']
assert t[1]['c']['ry'] == c[1]['ry']
assert isinstance(t['d'], NdarrayMixin)
assert t['d'][1][0] == d[1][0]
assert t['d'][1][1] == d[1][1]
assert t[1]['d'][0] == d[1][0]
assert t[1]['d'][1] == d[1][1]
assert t.pformat() == [' a b c d [2] ',
'-------- ---------- ------------ ------',
"(1, 'a') (10, 'aa') (100, 'raa') 0 .. 1",
"(2, 'b') (20, 'bb') (200, 'rbb') 2 .. 3",
"(3, 'c') (30, 'cc') (300, 'rcc') 4 .. 5",
"(4, 'd') (40, 'dd') (400, 'rdd') 6 .. 7"]
def test_possible_string_format_functions():
"""
The QuantityInfo info class for Quantity implements a
possible_string_format_functions() method that overrides the
standard pprint._possible_string_format_functions() function.
Test this.
"""
t = QTable([[1, 2] * u.m])
t['col0'].info.format = '%.3f'
assert t.pformat() == [' col0',
' m ',
'-----',
'1.000',
'2.000']
t['col0'].info.format = 'hi {:.3f}'
assert t.pformat() == [' col0 ',
' m ',
'--------',
'hi 1.000',
'hi 2.000']
t['col0'].info.format = '.4f'
assert t.pformat() == [' col0 ',
' m ',
'------',
'1.0000',
'2.0000']
def test_rename_mixin_columns(mixin_cols):
"""
Rename a mixin column.
"""
t = QTable(mixin_cols)
tc = t.copy()
t.rename_column('m', 'mm')
assert t.colnames == ['i', 'a', 'b', 'mm']
if isinstance(t['mm'], table_helpers.ArrayWrapper):
assert np.all(t['mm'].data == tc['m'].data)
elif isinstance(t['mm'], coordinates.SkyCoord):
assert np.all(t['mm'].ra == tc['m'].ra)
assert np.all(t['mm'].dec == tc['m'].dec)
else:
assert np.all(t['mm'] == tc['m'])
def test_represent_mixins_as_columns_unit_fix():
"""
If the unit is invalid for a column that gets serialized this would
cause an exception. Fixed in #7481.
"""
t = Table({'a': [1, 2]}, masked=True)
t['a'].unit = 'not a valid unit'
t['a'].mask[1] = True
serialize._represent_mixins_as_columns(t)
|
80f156d9d51feee50afc2618ec90feaa728ec2ce57c15058339604de1cc01671 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_almost_equal_nulp
from ..convolve import convolve_fft
VALID_DTYPES = []
for dtype_array in ['>f4', '<f4', '>f8', '<f8']:
for dtype_kernel in ['>f4', '<f4', '>f8', '<f8']:
VALID_DTYPES.append((dtype_array, dtype_kernel))
BOUNDARY_OPTIONS = [None, 'fill', 'wrap']
NANTREATMENT_OPTIONS = ('interpolate', 'fill')
"""
What does convolution mean? We use the 'same size' assumption here (i.e.,
you expect an array of the exact same size as the one you put in)
Convolving any array with a kernel that is [1] should result in the same array returned
Working example array: [1, 2, 3, 4, 5]
Convolved with [1] = [1, 2, 3, 4, 5]
Convolved with [1, 1] = [1, 3, 5, 7, 9] THIS IS NOT CONSISTENT!
Convolved with [1, 0] = [1, 2, 3, 4, 5]
Convolved with [0, 1] = [0, 1, 2, 3, 4]
"""
# NOTE: use_numpy_fft is redundant if you don't have FFTW installed
option_names = ('boundary', 'nan_treatment', 'normalize_kernel')
options = list(itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
(True, False),
))
option_names_preserve_nan = ('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan')
options_preserve_nan = list(itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
(True, False),
(True, False)))
def assert_floatclose(x, y):
"""Assert arrays are close to within expected floating point rounding.
Check that the result is correct at the precision expected for 64 bit
numbers, taking account that the tolerance has to reflect that all powers
in the FFTs enter our values.
"""
# The number used is set by the fact that the Windows FFT sometimes
# returns an answer that is EXACTLY 10*np.spacing.
assert_allclose(x, y, atol=10*np.spacing(x.max()), rtol=0.)
class TestConvolve1D:
@pytest.mark.parametrize(option_names, options)
def test_unity_1_none(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a unit kernel with a single element returns the same array
'''
x = np.array([1., 2., 3.], dtype='float64')
y = np.array([1.], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_unity_3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None).
'''
x = np.array([1., 2., 3.], dtype='float64')
y = np.array([0., 1., 0.], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_uniform_3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements
'''
x = np.array([1., 0., 3.], dtype='float64')
y = np.array([1., 1., 1.], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
answer_key = (boundary, nan_treatment, normalize_kernel)
answer_dict = {
'sum_fill_zeros': np.array([1., 4., 3.], dtype='float64'),
'average_fill_zeros': np.array([1 / 3., 4 / 3., 1.], dtype='float64'),
'sum_wrap': np.array([4., 4., 4.], dtype='float64'),
'average_wrap': np.array([4 / 3., 4 / 3., 4 / 3.], dtype='float64'),
}
result_dict = {
# boundary, nan_treatment, normalize_kernel
('fill', 'interpolate', True): answer_dict['average_fill_zeros'],
('wrap', 'interpolate', True): answer_dict['average_wrap'],
('fill', 'interpolate', False): answer_dict['sum_fill_zeros'],
('wrap', 'interpolate', False): answer_dict['sum_wrap'],
}
for k in list(result_dict.keys()):
result_dict[(k[0], 'fill', k[2])] = result_dict[k]
for k in list(result_dict.keys()):
if k[0] == 'fill':
result_dict[(None, k[1], k[2])] = result_dict[k]
assert_floatclose(z, result_dict[answer_key])
@pytest.mark.parametrize(option_names, options)
def test_halfity_3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that the different modes are producing the correct results using
a uniform, non-unity kernel with three elements
'''
x = np.array([1., 0., 3.], dtype='float64')
y = np.array([0.5, 0.5, 0.5], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
answer_dict = {
'sum': np.array([0.5, 2.0, 1.5], dtype='float64'),
'sum_zeros': np.array([0.5, 2., 1.5], dtype='float64'),
'sum_nozeros': np.array([0.5, 2., 1.5], dtype='float64'),
'average': np.array([1 / 3., 4 / 3., 1.], dtype='float64'),
'sum_wrap': np.array([2., 2., 2.], dtype='float64'),
'average_wrap': np.array([4 / 3., 4 / 3., 4 / 3.], dtype='float64'),
'average_zeros': np.array([1 / 3., 4 / 3., 1.], dtype='float64'),
'average_nozeros': np.array([0.5, 4 / 3., 1.5], dtype='float64'),
}
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
else:
# average = average_zeros; sum = sum_zeros
answer_key += '_zeros'
assert_floatclose(z, answer_dict[answer_key])
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_unity_3_withnan(self, boundary, nan_treatment, normalize_kernel,
preserve_nan):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
'''
x = np.array([1., np.nan, 3.], dtype='float64')
y = np.array([0., 1., 0.], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
z = np.nan_to_num(z)
assert_floatclose(z, [1., 0., 3.])
inputs = (np.array([1., np.nan, 3.], dtype='float64'),
np.array([1., np.inf, 3.], dtype='float64'))
outputs = (np.array([1., 0., 3.], dtype='float64'),
np.array([1., 0., 3.], dtype='float64'))
options_unity1withnan = list(itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
(True, False),
(True, False),
inputs, outputs))
@pytest.mark.parametrize(option_names_preserve_nan + ('inval', 'outval'),
options_unity1withnan)
def test_unity_1_withnan(self, boundary, nan_treatment, normalize_kernel,
preserve_nan, inval, outval):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
'''
x = inval
y = np.array([1.], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
z = np.nan_to_num(z)
assert_floatclose(z, outval)
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_uniform_3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements. This version includes a NaN
value in the original array.
'''
x = np.array([1., np.nan, 3.], dtype='float64')
y = np.array([1., 1., 1.], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
answer_dict = {
'sum': np.array([1., 4., 3.], dtype='float64'),
'sum_nozeros': np.array([1., 4., 3.], dtype='float64'),
'sum_zeros': np.array([1., 4., 3.], dtype='float64'),
'sum_nozeros_interpnan': np.array([1., 4., 3.], dtype='float64'),
'average': np.array([1., 2., 3.], dtype='float64'),
'sum_wrap': np.array([4., 4., 4.], dtype='float64'),
'average_wrap': np.array([4/3., 4/3., 4/3.], dtype='float64'),
'average_wrap_interpnan': np.array([2, 2, 2], dtype='float64'),
'average_nozeros': np.array([1/2., 4/3., 3/2.], dtype='float64'),
'average_nozeros_interpnan': np.array([1., 2., 3.], dtype='float64'),
'average_zeros': np.array([1 / 3., 4 / 3., 3 / 3.], dtype='float64'),
'average_zeros_interpnan': np.array([1 / 2., 4 / 2., 3 / 2.], dtype='float64'),
}
for key in list(answer_dict.keys()):
if 'sum' in key:
answer_dict[key+"_interpnan"] = answer_dict[key] * 3./2.
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
else:
# average = average_zeros; sum = sum_zeros
answer_key += '_zeros'
if nan_treatment == 'interpolate':
answer_key += '_interpnan'
posns = np.where(np.isfinite(z))
assert_floatclose(z[posns], answer_dict[answer_key][posns])
def test_nan_fill(self):
# Test masked array
array = np.array([1., np.nan, 3.], dtype='float64')
kernel = np.array([1, 1, 1])
masked_array = np.ma.masked_array(array, mask=[0, 1, 0])
result = convolve_fft(masked_array, kernel, boundary='fill', fill_value=np.nan)
assert_floatclose(result, [1, 2, 3])
def test_masked_array(self):
"""
Check whether convolve_fft works with masked arrays.
"""
# Test masked array
array = np.array([1., np.nan, 3.], dtype='float64')
kernel = np.array([1, 1, 1])
masked_array = np.ma.masked_array(array, mask=[0, 1, 0])
result = convolve_fft(masked_array, kernel, boundary='fill', fill_value=np.nan)
assert_floatclose(result, [1, 2, 3])
# Test masked kernel
array = np.array([1., np.nan, 3.], dtype='float64')
kernel = np.array([1, 1, 1])
masked_array = np.ma.masked_array(array, mask=[0, 1, 0])
result = convolve_fft(masked_array, kernel, boundary='fill', fill_value=np.nan)
assert_floatclose(result, [1, 2, 3])
def test_normalize_function(self):
"""
Check if convolve_fft works when passing a normalize function.
"""
array = [1, 2, 3]
kernel = [3, 3, 3]
result = convolve_fft(array, kernel, normalize_kernel=np.max)
assert_floatclose(result, [3, 6, 5])
@pytest.mark.parametrize(option_names, options)
def test_normalization_is_respected(self, boundary,
nan_treatment,
normalize_kernel):
"""
Check that if normalize_kernel is False then the normalization
tolerance is respected.
"""
array = np.array([1, 2, 3])
# A simple identity kernel to which a non-zero normalization is added.
base_kernel = np.array([1.0])
# Use the same normalization error tolerance in all cases.
normalization_rtol = 1e-4
# Add the error below to the kernel.
norm_error = [normalization_rtol / 10, normalization_rtol * 10]
for err in norm_error:
kernel = base_kernel + err
result = convolve_fft(array, kernel,
normalize_kernel=normalize_kernel,
nan_treatment=nan_treatment,
normalization_zero_tol=normalization_rtol)
if normalize_kernel:
# Kernel has been normalized to 1.
assert_floatclose(result, array)
else:
# Kernel should not have been normalized...
assert_floatclose(result, array * kernel)
class TestConvolve2D:
@pytest.mark.parametrize(option_names, options)
def test_unity_1x1_none(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a 1x1 unit kernel returns the same array
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='float64')
y = np.array([[1.]], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_unity_3x3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None).
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='float64')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_uniform_3x3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel.
'''
x = np.array([[0., 0., 3.],
[1., 0., 0.],
[0., 2., 0.]], dtype='float64')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
fill_value=np.nan if normalize_kernel else 0,
normalize_kernel=normalize_kernel)
w = np.array([[4., 6., 4.],
[6., 9., 6.],
[4., 6., 4.]], dtype='float64')
answer_dict = {
'sum': np.array([[1., 4., 3.],
[3., 6., 5.],
[3., 3., 2.]], dtype='float64'),
'sum_wrap': np.array([[6., 6., 6.],
[6., 6., 6.],
[6., 6., 6.]], dtype='float64'),
}
answer_dict['average'] = answer_dict['sum'] / w
answer_dict['average_wrap'] = answer_dict['sum_wrap'] / 9.
answer_dict['average_withzeros'] = answer_dict['sum'] / 9.
answer_dict['sum_withzeros'] = answer_dict['sum']
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
elif nan_treatment == 'fill':
answer_key += '_withzeros'
a = answer_dict[answer_key]
assert_floatclose(z, a)
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_unity_3x3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
'''
x = np.array([[1., 2., 3.],
[4., np.nan, 6.],
[7., 8., 9.]], dtype='float64')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='float64')
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1, 1])
z = np.nan_to_num(z)
x = np.nan_to_num(x)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_uniform_3x3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[0., 0., 3.],
[1., np.nan, 0.],
[0., 2., 0.]], dtype='float64')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='float64')
# commented out: allow unnormalized nan-ignoring convolution
# # kernel is not normalized, so this situation -> exception
# if nan_treatment and not normalize_kernel:
# with pytest.raises(ValueError):
# z = convolve_fft(x, y, boundary=boundary,
# nan_treatment=nan_treatment,
# normalize_kernel=normalize_kernel,
# ignore_edge_zeros=ignore_edge_zeros,
# )
# return
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
fill_value=np.nan if normalize_kernel else 0,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1, 1])
# weights
w_n = np.array([[3., 5., 3.],
[5., 8., 5.],
[3., 5., 3.]], dtype='float64')
w_z = np.array([[4., 6., 4.],
[6., 9., 6.],
[4., 6., 4.]], dtype='float64')
answer_dict = {
'sum': np.array([[1., 4., 3.],
[3., 6., 5.],
[3., 3., 2.]], dtype='float64'),
'sum_wrap': np.array([[6., 6., 6.],
[6., 6., 6.],
[6., 6., 6.]], dtype='float64'),
}
answer_dict['average'] = answer_dict['sum'] / w_z
answer_dict['average_interpnan'] = answer_dict['sum'] / w_n
answer_dict['average_wrap_interpnan'] = answer_dict['sum_wrap'] / 8.
answer_dict['average_wrap'] = answer_dict['sum_wrap'] / 9.
answer_dict['average_withzeros'] = answer_dict['sum'] / 9.
answer_dict['average_withzeros_interpnan'] = answer_dict['sum'] / 8.
answer_dict['sum_withzeros'] = answer_dict['sum']
answer_dict['sum_interpnan'] = answer_dict['sum'] * 9/8.
answer_dict['sum_withzeros_interpnan'] = answer_dict['sum']
answer_dict['sum_wrap_interpnan'] = answer_dict['sum_wrap'] * 9/8.
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
elif nan_treatment == 'fill':
answer_key += '_withzeros'
if nan_treatment == 'interpolate':
answer_key += '_interpnan'
a = answer_dict[answer_key]
# Skip the NaN at [1, 1] when preserve_nan=True
posns = np.where(np.isfinite(z))
# for reasons unknown, the Windows FFT returns an answer for the [0, 0]
# component that is EXACTLY 10*np.spacing
assert_floatclose(z[posns], z[posns])
def test_big_fail(self):
""" Test that convolve_fft raises an exception if a too-large array is passed in """
with pytest.raises((ValueError, MemoryError)):
# while a good idea, this approach did not work; it actually writes to disk
# arr = np.memmap('file.np', mode='w+', shape=(512, 512, 512), dtype=complex)
# this just allocates the memory but never touches it; it's better:
arr = np.empty([512, 512, 512], dtype=complex)
# note 512**3 * 16 bytes = 2.0 GB
convolve_fft(arr, arr)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_non_normalized_kernel(self, boundary):
x = np.array([[0., 0., 4.],
[1., 2., 0.],
[0., 3., 0.]], dtype='float')
y = np.array([[1., -1., 1.],
[-1., 0., -1.],
[1., -1., 1.]], dtype='float')
z = convolve_fft(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary in (None, 'fill'):
assert_floatclose(z, np.array([[1., -5., 2.],
[1., 0., -3.],
[-2., -1., -1.]], dtype='float'))
elif boundary == 'wrap':
assert_floatclose(z, np.array([[0., -8., 6.],
[5., 0., -4.],
[2., 3., -4.]], dtype='float'))
else:
raise ValueError("Invalid boundary specification")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_asymmetric_kernel(boundary):
'''
Make sure that asymmetric convolution
functions go the right direction
'''
x = np.array([3., 0., 1.], dtype='>f8')
y = np.array([1, 2, 3], dtype='>f8')
z = convolve_fft(x, y, boundary=boundary, normalize_kernel=False)
if boundary in (None, 'fill'):
assert_array_almost_equal_nulp(z, np.array([6., 10., 2.], dtype='float'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([9., 10., 5.], dtype='float'), 10)
|
979cb4f7577f6b509211d04a7c6151170a0aafe40477fbd1d1a117e7e6cd446f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
import numpy.ma as ma
from ..convolve import convolve, convolve_fft
from numpy.testing import assert_array_almost_equal_nulp, assert_array_almost_equal
import itertools
VALID_DTYPES = ('>f4', '<f4', '>f8', '<f8')
VALID_DTYPE_MATRIX = list(itertools.product(VALID_DTYPES, VALID_DTYPES))
BOUNDARY_OPTIONS = [None, 'fill', 'wrap', 'extend']
NANHANDLING_OPTIONS = ['interpolate', 'fill']
NORMALIZE_OPTIONS = [True, False]
PRESERVE_NAN_OPTIONS = [True, False]
BOUNDARIES_AND_CONVOLUTIONS = (list(zip(itertools.cycle((convolve,)),
BOUNDARY_OPTIONS)) + [(convolve_fft,
'wrap'),
(convolve_fft,
'fill')])
HAS_SCIPY = True
try:
import scipy
except ImportError:
HAS_SCIPY = False
HAS_PANDAS = True
try:
import pandas
except ImportError:
HAS_PANDAS = False
class TestConvolve1D:
def test_list(self):
"""
Test that convolve works correctly when inputs are lists
"""
x = [1, 4, 5, 6, 5, 7, 8]
y = [0.2, 0.6, 0.2]
z = convolve(x, y, boundary=None)
assert_array_almost_equal_nulp(z,
np.array([0., 3.6, 5., 5.6, 5.6, 6.8, 0.]), 10)
def test_tuple(self):
"""
Test that convolve works correctly when inputs are tuples
"""
x = (1, 4, 5, 6, 5, 7, 8)
y = (0.2, 0.6, 0.2)
z = convolve(x, y, boundary=None)
assert_array_almost_equal_nulp(z,
np.array([0., 3.6, 5., 5.6, 5.6, 6.8, 0.]), 10)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan', 'dtype'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES))
def test_input_unmodified(self, boundary, nan_treatment,
normalize_kernel, preserve_nan, dtype):
"""
Test that convolve works correctly when inputs are lists
"""
array = [1., 4., 5., 6., 5., 7., 8.]
kernel = [0.2, 0.6, 0.2]
x = np.array(array, dtype=dtype)
y = np.array(kernel, dtype=dtype)
# Make pseudoimmutable
x.flags.writeable = False
y.flags.writeable = False
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)
assert np.all(np.array(array, dtype=dtype) == x)
assert np.all(np.array(kernel, dtype=dtype) == y)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan', 'dtype'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES))
def test_input_unmodified_with_nan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan, dtype):
"""
Test that convolve doesn't modify the input data
"""
array = [1., 4., 5., np.nan, 5., 7., 8.]
kernel = [0.2, 0.6, 0.2]
x = np.array(array, dtype=dtype)
y = np.array(kernel, dtype=dtype)
# Make pseudoimmutable
x.flags.writeable = False
y.flags.writeable = False
# make copies for post call comparison
x_copy = x.copy()
y_copy = y.copy()
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)
# ( NaN == NaN ) = False
# Only compare non NaN values for canonical equivilance
# and then check NaN explicitly with np.isnan()
array_is_nan = np.isnan(array)
kernel_is_nan = np.isnan(kernel)
array_not_nan = ~array_is_nan
kernel_not_nan = ~kernel_is_nan
assert np.all(x_copy[array_not_nan] == x[array_not_nan])
assert np.all(y_copy[kernel_not_nan] == y[kernel_not_nan])
assert np.all(np.isnan(x[array_is_nan]))
assert np.all(np.isnan(y[kernel_is_nan]))
@pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)
def test_dtype(self, dtype_array, dtype_kernel):
'''
Test that 32- and 64-bit floats are correctly handled
'''
x = np.array([1., 2., 3.], dtype=dtype_array)
y = np.array([0., 1., 0.], dtype=dtype_kernel)
z = convolve(x, y)
assert x.dtype == z.dtype
@pytest.mark.parametrize(('convfunc', 'boundary',), BOUNDARIES_AND_CONVOLUTIONS)
def test_unity_1_none(self, boundary, convfunc):
'''
Test that a unit kernel with a single element returns the same array
'''
x = np.array([1., 2., 3.], dtype='>f8')
y = np.array([1.], dtype='>f8')
z = convfunc(x, y, boundary=boundary)
np.testing.assert_allclose(z, x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3(self, boundary):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None).
'''
x = np.array([1., 2., 3.], dtype='>f8')
y = np.array([0., 1., 0.], dtype='>f8')
z = convolve(x, y, boundary=boundary)
if boundary is None:
assert np.all(z == np.array([0., 2., 0.], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3(self, boundary):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements
'''
x = np.array([1., 0., 3.], dtype='>f8')
y = np.array([1., 1., 1.], dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary is None:
assert np.all(z == np.array([0., 4., 0.], dtype='>f8'))
elif boundary == 'fill':
assert np.all(z == np.array([1., 4., 3.], dtype='>f8'))
elif boundary == 'wrap':
assert np.all(z == np.array([4., 4., 4.], dtype='>f8'))
else:
assert np.all(z == np.array([2., 4., 6.], dtype='>f8'))
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS))
def test_unity_3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
'''
x = np.array([1., np.nan, 3.], dtype='>f8')
y = np.array([0., 1., 0.], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([0., 0., 0.], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS))
def test_uniform_3_withnan(self, boundary, nan_treatment, normalize_kernel,
preserve_nan):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements. This version includes a NaN
value in the original array.
'''
x = np.array([1., np.nan, 3.], dtype='>f8')
y = np.array([1., 1., 1.], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
z = np.nan_to_num(z)
# boundary, nan_treatment, normalize_kernel
rslt = {
(None, 'interpolate', True): [0, 2, 0],
(None, 'interpolate', False): [0, 6, 0],
(None, 'fill', True): [0, 4/3., 0],
(None, 'fill', False): [0, 4, 0],
('fill', 'interpolate', True): [1/2., 2, 3/2.],
('fill', 'interpolate', False): [3/2., 6, 9/2.],
('fill', 'fill', True): [1/3., 4/3., 3/3.],
('fill', 'fill', False): [1, 4, 3],
('wrap', 'interpolate', True): [2, 2, 2],
('wrap', 'interpolate', False): [6, 6, 6],
('wrap', 'fill', True): [4/3., 4/3., 4/3.],
('wrap', 'fill', False): [4, 4, 4],
('extend', 'interpolate', True): [1, 2, 3],
('extend', 'interpolate', False): [3, 6, 9],
('extend', 'fill', True): [2/3., 4/3., 6/3.],
('extend', 'fill', False): [2, 4, 6],
}[boundary, nan_treatment, normalize_kernel]
if preserve_nan:
rslt[1] = 0
assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary', 'normalize_kernel'),
itertools.product(BOUNDARY_OPTIONS,
NORMALIZE_OPTIONS))
def test_zero_sum_kernel(self, boundary, normalize_kernel):
"""
Test that convolve works correctly with zero sum kernels.
"""
if normalize_kernel:
pytest.xfail("You can't normalize by a zero sum kernel")
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y = [-1, -1, -1, -1, 8, -1, -1, -1, -1]
assert(np.isclose(sum(y), 0, atol=1e-8))
z = convolve(x, y, boundary=boundary, normalize_kernel=normalize_kernel)
# boundary, normalize_kernel == False
rslt = {
(None): [0., 0., 0., 0., 0., 0., 0., 0., 0.],
('fill'): [-6., -3., -1., 0., 0., 10., 21., 33., 46.],
('wrap'): [-36., -27., -18., -9., 0., 9., 18., 27., 36.],
('extend'): [-10., -6., -3., -1., 0., 1., 3., 6., 10.]
}[boundary]
assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary', 'normalize_kernel'),
itertools.product(BOUNDARY_OPTIONS,
NORMALIZE_OPTIONS))
def test_int_masked_kernel(self, boundary, normalize_kernel):
"""
Test that convolve works correctly with integer masked kernels.
"""
if normalize_kernel:
pytest.xfail("You can't normalize by a zero sum kernel")
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y = ma.array([-1, -1, -1, -1, 8, -1, -1, -1, -1], mask=[1, 0, 0, 0, 0, 0, 0, 0, 0], fill_value=0.)
z = convolve(x, y, boundary=boundary, normalize_kernel=normalize_kernel)
# boundary, normalize_kernel == False
rslt = {
(None): [0., 0., 0., 0., 9., 0., 0., 0., 0.],
('fill'): [-1., 3., 6., 8., 9., 10., 21., 33., 46.],
('wrap'): [-31., -21., -11., -1., 9., 10., 20., 30., 40.],
('extend'): [-5., 0., 4., 7., 9., 10., 12., 15., 19.]
}[boundary]
assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)
@pytest.mark.parametrize('preserve_nan', PRESERVE_NAN_OPTIONS)
def test_int_masked_array(self, preserve_nan):
"""
Test that convolve works correctly with integer masked arrays.
"""
x = ma.array([3, 5, 7, 11, 13], mask=[0, 0, 1, 0, 0], fill_value=0.)
y = np.array([1., 1., 1.], dtype='>f8')
z = convolve(x, y, preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[2])
z[2] = 8
assert_array_almost_equal_nulp(z, (8/3., 4, 8, 12, 8), 10)
class TestConvolve2D:
def test_list(self):
"""
Test that convolve works correctly when inputs are lists
"""
x = [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]
z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=True)
assert_array_almost_equal_nulp(z, x, 10)
z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=False)
assert_array_almost_equal_nulp(z, np.array(x, float)*9, 10)
@pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)
def test_dtype(self, dtype_array, dtype_kernel):
'''
Test that 32- and 64-bit floats are correctly handled
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype=dtype_array)
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype=dtype_kernel)
z = convolve(x, y)
assert x.dtype == z.dtype
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_1x1_none(self, boundary):
'''
Test that a 1x1 unit kernel returns the same array
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='>f8')
y = np.array([[1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary)
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3x3(self, boundary):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None).
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='>f8')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='>f8')
z = convolve(x, y, boundary=boundary)
if boundary is None:
assert np.all(z == np.array([[0., 0., 0.],
[0., 5., 0.],
[0., 0., 0.]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel.
'''
x = np.array([[0., 0., 3.],
[1., 0., 0.],
[0., 2., 0.]], dtype='>f8')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 6., 0.],
[0., 0., 0.]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1., 4., 3.],
[3., 6., 5.],
[3., 3., 2.]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[6., 6., 6.],
[6., 6., 6.],
[6., 6., 6.]], dtype='>f8'), 10)
else:
assert_array_almost_equal_nulp(z, np.array([[2., 7., 12.],
[4., 6., 8.],
[6., 5., 4.]], dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3x3_withnan(self, boundary):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
'''
x = np.array([[1., 2., 3.],
[4., np.nan, 6.],
[7., 8., 9.]], dtype='>f8')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
preserve_nan=True)
assert np.isnan(z[1, 1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3_withnanfilled(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[0., 0., 4.],
[1., np.nan, 0.],
[0., 3., 0.]], dtype='>f8')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 8., 0.],
[0., 0., 0.]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1., 5., 4.],
[4., 8., 7.],
[4., 4., 3.]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[8., 8., 8.],
[8., 8., 8.],
[8., 8., 8.]], dtype='>f8'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[2., 9., 16.],
[5., 8., 11.],
[8., 7., 6.]], dtype='>f8'), 10)
else:
raise ValueError("Invalid boundary specification")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3_withnaninterped(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[0., 0., 4.],
[1., np.nan, 0.],
[0., 3., 0.]], dtype='>f8')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='interpolate',
normalize_kernel=True)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1./8, 5./8, 4./8],
[4./8, 8./8, 7./8],
[4./8, 4./8, 3./8]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[2./8, 9./8, 16./8],
[5./8, 8./8, 11./8],
[8./8, 7./8, 6./8]], dtype='>f8'), 10)
else:
raise ValueError("Invalid boundary specification")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_non_normalized_kernel_2D(self, boundary):
x = np.array([[0., 0., 4.],
[1., 2., 0.],
[0., 3., 0.]], dtype='float')
y = np.array([[1., -1., 1.],
[-1., 0., -1.],
[1., -1., 1.]], dtype='float')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]], dtype='float'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1., -5., 2.],
[1., 0., -3.],
[-2., -1., -1.]], dtype='float'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[0., -8., 6.],
[5., 0., -4.],
[2., 3., -4.]], dtype='float'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[2., -1., -2.],
[0., 0., 1.],
[2., -4., 2.]], dtype='float'), 10)
else:
raise ValueError("Invalid boundary specification")
class TestConvolve3D:
def test_list(self):
"""
Test that convolve works correctly when inputs are lists
"""
x = [[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]]
z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=False)
assert_array_almost_equal_nulp(z / 27, x, 10)
@pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)
def test_dtype(self, dtype_array, dtype_kernel):
'''
Test that 32- and 64-bit floats are correctly handled
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype=dtype_array)
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype=dtype_kernel)
z = convolve(x, y)
assert x.dtype == z.dtype
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_1x1x1_none(self, boundary):
'''
Test that a 1x1x1 unit kernel returns the same array
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., 0., 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.array([[[1.]]], dtype='>f8')
z = convolve(x, y, boundary=boundary)
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3x3x3(self, boundary):
'''
Test that a 3x3x3 unit kernel returns the same array (except when
boundary is None).
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., 3., 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.zeros((3, 3, 3), dtype='>f8')
y[1, 1, 1] = 1.
z = convolve(x, y, boundary=boundary)
if boundary is None:
assert np.all(z == np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 3., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3x3(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., 3., 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.ones((3, 3, 3), dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 81., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[[23., 28., 16.], [35., 46., 25.], [25., 34., 18.]],
[[40., 50., 23.], [63., 81., 36.], [46., 60., 27.]],
[[32., 40., 16.], [50., 61., 22.], [36., 44., 16.]]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]],
[[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]],
[[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]]], dtype='>f8'), 10)
else:
assert_array_almost_equal_nulp(z, np.array([[[65., 54., 43.], [75., 66., 57.], [85., 78., 71.]],
[[96., 71., 46.], [108., 81., 54.], [120., 91., 62.]],
[[127., 88., 49.], [141., 96., 51.], [155., 104., 53.]]], dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary', 'nan_treatment'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS))
def test_unity_3x3x3_withnan(self, boundary, nan_treatment):
'''
Test that a 3x3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.zeros((3, 3, 3), dtype='>f8')
y[1, 1, 1] = 1.
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
preserve_nan=True)
assert np.isnan(z[1, 1, 1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3x3_withnan_filled(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.ones((3, 3, 3), dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 78., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[[20., 25., 13.],
[32., 43., 22.],
[22., 31., 15.]],
[[37., 47., 20.],
[60., 78., 33.],
[43., 57., 24.]],
[[29., 37., 13.],
[47., 58., 19.],
[33., 41., 13.]]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]],
[[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]],
[[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]]], dtype='>f8'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[[62., 51., 40.],
[72., 63., 54.],
[82., 75., 68.]],
[[93., 68., 43.],
[105., 78., 51.],
[117., 88., 59.]],
[[124., 85., 46.],
[138., 93., 48.],
[152., 101., 50.]]],
dtype='>f8'), 10)
else:
raise ValueError("Invalid Boundary Option")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3x3_withnan_interped(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.ones((3, 3, 3), dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='interpolate',
normalize_kernel=True)
kernsum = y.sum() - 1 # one nan is missing
mid = x[np.isfinite(x)].sum() / kernsum
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 78., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
dtype='>f8')/kernsum, 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[[20., 25., 13.],
[32., 43., 22.],
[22., 31., 15.]],
[[37., 47., 20.],
[60., 78., 33.],
[43., 57., 24.]],
[[29., 37., 13.],
[47., 58., 19.],
[33., 41., 13.]]],
dtype='>f8')/kernsum, 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.tile(mid.astype('>f8'), [3, 3, 3]), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[[62., 51., 40.],
[72., 63., 54.],
[82., 75., 68.]],
[[93., 68., 43.],
[105., 78., 51.],
[117., 88., 59.]],
[[124., 85., 46.],
[138., 93., 48.],
[152., 101., 50.]]],
dtype='>f8')/kernsum, 10)
else:
raise ValueError("Invalid Boundary Option")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_asymmetric_kernel(boundary):
'''
Regression test for #6264: make sure that asymmetric convolution
functions go the right direction
'''
x = np.array([3., 0., 1.], dtype='>f8')
y = np.array([1, 2, 3], dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([6., 10., 2.], dtype='float'), 10)
elif boundary is None:
assert_array_almost_equal_nulp(z, np.array([0., 10., 0.], dtype='float'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([15., 10., 3.], dtype='float'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([9., 10., 5.], dtype='float'), 10)
@pytest.mark.parametrize('ndims', (1, 2, 3))
def test_convolution_consistency(ndims):
np.random.seed(0)
array = np.random.randn(*([3]*ndims))
np.random.seed(0)
kernel = np.random.rand(*([3]*ndims))
conv_f = convolve_fft(array, kernel, boundary='fill')
conv_d = convolve(array, kernel, boundary='fill')
assert_array_almost_equal_nulp(conv_f, conv_d, 30)
def test_astropy_convolution_against_numpy():
x = np.array([1, 2, 3])
y = np.array([5, 4, 3, 2, 1])
assert_array_almost_equal(np.convolve(y, x, 'same'),
convolve(y, x, normalize_kernel=False))
assert_array_almost_equal(np.convolve(y, x, 'same'),
convolve_fft(y, x, normalize_kernel=False))
@pytest.mark.skipif('not HAS_SCIPY')
def test_astropy_convolution_against_scipy():
from scipy.signal import fftconvolve
x = np.array([1, 2, 3])
y = np.array([5, 4, 3, 2, 1])
assert_array_almost_equal(fftconvolve(y, x, 'same'),
convolve(y, x, normalize_kernel=False))
assert_array_almost_equal(fftconvolve(y, x, 'same'),
convolve_fft(y, x, normalize_kernel=False))
@pytest.mark.skipif('not HAS_PANDAS')
def test_regression_6099():
wave = np.array((np.linspace(5000, 5100, 10)))
boxcar = 3
nonseries_result = convolve(wave, np.ones((boxcar,))/boxcar)
wave_series = pandas.Series(wave)
series_result = convolve(wave_series, np.ones((boxcar,))/boxcar)
assert_array_almost_equal(nonseries_result, series_result)
def test_invalid_array_convolve():
kernel = np.ones(3)/3.
with pytest.raises(TypeError):
convolve('glork', kernel)
|
06cd9d704625168c80e340575dd8a32215bbb63dccc0f26b043a5476b461c831 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Module to test fitting routines
"""
import os.path
import pytest
import numpy as np
from numpy import linalg
from numpy.testing import assert_allclose, assert_almost_equal
from unittest import mock
from . import irafutil
from .. import models
from ..core import Fittable2DModel, Parameter
from ..fitting import *
from ...utils import NumpyRNGContext
from ...utils.data import get_pkg_data_filename
from .utils import ignore_non_integer_warning
from ...stats import sigma_clip
from ...utils.exceptions import AstropyUserWarning
from ..fitting import populate_entry_points
import warnings
try:
from scipy import optimize
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
try:
from pkg_resources import EntryPoint
HAS_PKG = True
except ImportError:
HAS_PKG = False
fitters = [SimplexLSQFitter, SLSQPLSQFitter]
_RANDOM_SEED = 0x1337
class TestPolynomial2D:
"""Tests for 2D polynomail fitting."""
def setup_class(self):
self.model = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
def poly2(x, y):
return 1 + 2 * x + 3 * x ** 2 + 4 * y + 5 * y ** 2 + 6 * x * y
self.z = poly2(self.x, self.y)
self.fitter = LinearLSQFitter()
def test_poly2D_fitting(self):
v = self.model.fit_deriv(x=self.x, y=self.y)
p = linalg.lstsq(v, self.z.flatten())[0]
new_model = self.fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, p)
def test_eval(self):
new_model = self.fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model(self.x, self.y), self.z)
@pytest.mark.skipif('not HAS_SCIPY')
def test_polynomial2D_nonlinear_fitting(self):
self.model.parameters = [.6, 1.8, 2.9, 3.7, 4.9, 6.7]
nlfitter = LevMarLSQFitter()
new_model = nlfitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, [1, 2, 3, 4, 5, 6])
class TestICheb2D:
"""
Tests 2D Chebyshev polynomial fitting
Create a 2D polynomial (z) using Polynomial2DModel and default coefficients
Fit z using a ICheb2D model
Evaluate the ICheb2D polynomial and compare with the initial z
"""
def setup_class(self):
self.pmodel = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
self.z = self.pmodel(self.x, self.y)
self.cheb2 = models.Chebyshev2D(2, 2)
self.fitter = LinearLSQFitter()
def test_default_params(self):
self.cheb2.parameters = np.arange(9)
p = np.array([1344., 1772., 400., 1860., 2448., 552., 432., 568.,
128.])
z = self.cheb2(self.x, self.y)
model = self.fitter(self.cheb2, self.x, self.y, z)
assert_almost_equal(model.parameters, p)
def test_poly2D_cheb2D(self):
model = self.fitter(self.cheb2, self.x, self.y, self.z)
z1 = model(self.x, self.y)
assert_almost_equal(self.z, z1)
@pytest.mark.skipif('not HAS_SCIPY')
def test_chebyshev2D_nonlinear_fitting(self):
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
nlfitter = LevMarLSQFitter()
model = nlfitter(cheb2d, self.x, self.y, z)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
def test_chebyshev2D_nonlinear_fitting_with_weights(self):
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
nlfitter = LevMarLSQFitter()
weights = np.ones_like(self.y)
model = nlfitter(cheb2d, self.x, self.y, z, weights=weights)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
class TestJointFitter:
"""
Tests the joint fitting routine using 2 gaussian models
"""
def setup_class(self):
"""
Create 2 gaussian models and some data with noise.
Create a fitter for the two models keeping the amplitude parameter
common for the two models.
"""
self.g1 = models.Gaussian1D(10, mean=14.9, stddev=.3)
self.g2 = models.Gaussian1D(10, mean=13, stddev=.4)
self.jf = JointFitter([self.g1, self.g2],
{self.g1: ['amplitude'],
self.g2: ['amplitude']}, [9.8])
self.x = np.arange(10, 20, .1)
y1 = self.g1(self.x)
y2 = self.g2(self.x)
with NumpyRNGContext(_RANDOM_SEED):
n = np.random.randn(100)
self.ny1 = y1 + 2 * n
self.ny2 = y2 + 2 * n
self.jf(self.x, self.ny1, self.x, self.ny2)
def test_joint_parameter(self):
"""
Tests that the amplitude of the two models is the same
"""
assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])
assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])
def test_joint_fitter(self):
"""
Tests the fitting routine with similar procedure.
Compares the fitted parameters.
"""
p1 = [14.9, .3]
p2 = [13, .4]
A = 9.8
p = np.r_[A, p1, p2]
def model(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errfunc(p, x1, y1, x2, y2):
return np.ravel(np.r_[model(p[0], p[1:3], x1) - y1,
model(p[0], p[3:], x2) - y2])
coeff, _ = optimize.leastsq(errfunc, p,
args=(self.x, self.ny1, self.x, self.ny2))
assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2))
class TestLinearLSQFitter:
def test_compound_model_raises_error(self):
"""Test that if an user tries to use a compound model, raises an error"""
with pytest.raises(ValueError) as excinfo:
init_model1 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model2 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model_comp = init_model1 + init_model2
x = np.arange(10)
y = init_model_comp(x, model_set_axis=False)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model_comp, x, y)
assert "Model must be simple, not compound" in str(excinfo.value)
def test_chebyshev1D(self):
"""Tests fitting a 1D Chebyshev polynomial to some real world data."""
test_file = get_pkg_data_filename(os.path.join('data',
'idcompspec.fits'))
with open(test_file) as f:
lines = f.read()
reclist = lines.split('begin')
record = irafutil.IdentifyRecord(reclist[1])
coeffs = record.coeff
order = int(record.fields['order'])
initial_model = models.Chebyshev1D(order - 1,
domain=record.get_range())
fitter = LinearLSQFitter()
fitted_model = fitter(initial_model, record.x, record.z)
assert_allclose(fitted_model.parameters, np.array(coeffs),
rtol=10e-2)
def test_linear_fit_model_set(self):
"""Tests fitting multiple models simultaneously."""
init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected,
rtol=1e-1)
def test_linear_fit_2d_model_set(self):
"""Tests fitted multiple 2-D models simultaneously."""
init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
x = np.arange(10)
y = np.arange(10)
z_expected = init_model(x, y, model_set_axis=False)
assert z_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected,
rtol=1e-1)
def test_linear_fit_fixed_parameter(self):
"""
Tests fitting a polynomial model with a fixed parameter (issue #6135).
"""
init_model = models.Polynomial1D(degree=2, c1=1)
init_model.c1.fixed = True
x = np.arange(10)
y = 2 + x + 0.5*x*x
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [2., 1., 0.5], atol=1e-14)
def test_linear_fit_model_set_fixed_parameter(self):
"""
Tests fitting a polynomial model set with a fixed parameter (#6135).
"""
init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)
init_model.c1.fixed = True
x = np.arange(10)
yy = np.array([2 + x + 0.5*x*x, -2*x])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.c0, [2., 0.], atol=1e-14)
assert_allclose(fitted_model.c1, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c2, [0.5, 0.], atol=1e-14)
def test_linear_fit_2d_model_set_fixed_parameters(self):
"""
Tests fitting a 2d polynomial model set with fixed parameters (#6135).
"""
init_model = models.Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_model_set_masked_values(self):
"""
Tests model set fitting with masked value(s) (#4824, #6819).
"""
# NB. For single models, there is an equivalent doctest.
init_model = models.Polynomial1D(degree=1, n_models=2)
x = np.arange(10)
y = np.ma.masked_array([2*x+1, x-2], mask=np.zeros_like([x, x]))
y[0, 7] = 100. # throw off fit coefficients if unmasked
y.mask[0, 7] = True
y[1, 1:3] = -100.
y.mask[1, 1:3] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.c0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1, [2., 1.], atol=1e-14)
def test_linear_fit_2d_model_set_masked_values(self):
"""
Tests 2D model set fitting with masked value(s) (#4824, #6819).
"""
init_model = models.Polynomial2D(1, n_models=2)
x, y = np.mgrid[0:5, 0:5]
z = np.ma.masked_array([2*x+3*y+1, x-0.5*y-2],
mask=np.zeros_like([x, x]))
z[0, 3, 1] = -1000. # throw off fit coefficients if unmasked
z.mask[0, 3, 1] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model.c0_0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1_0, [2., 1.], atol=1e-14)
assert_allclose(fitted_model.c0_1, [3., -0.5], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestNonLinearFitters:
"""Tests non-linear least squares fitting and the SLSQP algorithm."""
def setup_class(self):
self.initial_values = [100, 5, 1]
self.xdata = np.arange(0, 10, 0.1)
sigma = 4. * np.ones_like(self.xdata)
with NumpyRNGContext(_RANDOM_SEED):
yerror = np.random.normal(0, sigma)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
self.ydata = func(self.initial_values, self.xdata) + yerror
self.gauss = models.Gaussian1D(100, 5, stddev=1)
def test_estimated_vs_analytic_deriv(self):
"""
Runs `LevMarLSQFitter` with estimated and analytic derivatives of a
`Gaussian1D`.
"""
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
efitter = LevMarLSQFitter()
emodel = efitter(g1e, self.xdata, self.ydata, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
def test_estimated_vs_analytic_deriv_with_weights(self):
"""
Runs `LevMarLSQFitter` with estimated and analytic derivatives of a
`Gaussian1D`.
"""
weights = 1.0 / (self.ydata / 10.)
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata, weights=weights)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
efitter = LevMarLSQFitter()
emodel = efitter(g1e, self.xdata, self.ydata, weights=weights, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
def test_with_optimize(self):
"""
Tests results from `LevMarLSQFitter` against `scipy.optimize.leastsq`.
"""
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errfunc(p, x, y):
return func(p, x) - y
result = optimize.leastsq(errfunc, self.initial_values,
args=(self.xdata, self.ydata))
assert_allclose(model.parameters, result[0], rtol=10 ** (-3))
def test_with_weights(self):
"""
Tests results from `LevMarLSQFitter` with weights.
"""
# part 1: weights are equal to 1
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=np.ones_like(self.xdata))
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
# part 2: weights are 0 or 1 (effectively, they are a mask)
weights = np.zeros_like(self.xdata)
weights[::2] = 1.
mask = weights >= 1.
model = fitter(self.gauss, self.xdata[mask], self.ydata[mask],
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=weights)
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
@pytest.mark.parametrize('fitter_class', fitters)
def test_fitter_against_LevMar(self, fitter_class):
"""Tests results from non-linear fitters against `LevMarLSQFitter`."""
levmar = LevMarLSQFitter()
fitter = fitter_class()
with ignore_non_integer_warning():
new_model = fitter(self.gauss, self.xdata, self.ydata)
model = levmar(self.gauss, self.xdata, self.ydata)
assert_allclose(model.parameters, new_model.parameters,
rtol=10 ** (-4))
def test_LSQ_SLSQP_with_constraints(self):
"""
Runs `LevMarLSQFitter` and `SLSQPLSQFitter` on a model with
constraints.
"""
g1 = models.Gaussian1D(100, 5, stddev=1)
g1.mean.fixed = True
fitter = LevMarLSQFitter()
fslsqp = SLSQPLSQFitter()
with ignore_non_integer_warning():
slsqp_model = fslsqp(g1, self.xdata, self.ydata)
model = fitter(g1, self.xdata, self.ydata)
assert_allclose(model.parameters, slsqp_model.parameters,
rtol=10 ** (-4))
def test_simplex_lsq_fitter(self):
"""A basic test for the `SimplexLSQ` fitter."""
class Rosenbrock(Fittable2DModel):
a = Parameter()
b = Parameter()
@staticmethod
def evaluate(x, y, a, b):
return (a - x) ** 2 + b * (y - x ** 2) ** 2
x = y = np.linspace(-3.0, 3.0, 100)
with NumpyRNGContext(_RANDOM_SEED):
z = Rosenbrock.evaluate(x, y, 1.0, 100.0)
z += np.random.normal(0., 0.1, size=z.shape)
fitter = SimplexLSQFitter()
r_i = Rosenbrock(1, 100)
r_f = fitter(r_i, x, y, z)
assert_allclose(r_f.parameters, [1.0, 100.0], rtol=1e-2)
def test_param_cov(self):
"""
Tests that the 'param_cov' fit_info entry gets the right answer for
*linear* least squares, where the answer is exact
"""
a = 2
b = 100
with NumpyRNGContext(_RANDOM_SEED):
x = np.linspace(0, 1, 100)
# y scatter is amplitude ~1 to make sure covarience is
# non-negligible
y = x*a + b + np.random.randn(len(x))
# first compute the ordinary least squares covariance matrix
X = np.matrix(np.vstack([x, np.ones(len(x))]).T)
beta = np.linalg.inv(X.T * X) * X.T * np.matrix(y).T
s2 = np.sum((y - (X * beta).A.ravel())**2) / (len(y) - len(beta))
olscov = np.linalg.inv(X.T * X) * s2
# now do the non-linear least squares fit
mod = models.Linear1D(a, b)
fitter = LevMarLSQFitter()
fmod = fitter(mod, x, y)
assert_allclose(fmod.parameters, beta.A.ravel())
assert_allclose(olscov, fitter.fit_info['param_cov'])
@pytest.mark.skipif('not HAS_PKG')
class TestEntryPoint:
"""Tests population of fitting with entry point fitters"""
def setup_class(self):
self.exception_not_thrown = Exception("The test should not have gotten here. There was no exception thrown")
def successfulimport(self):
# This should work
class goodclass(Fitter):
__name__ = "GoodClass"
return goodclass
def raiseimporterror(self):
# This should fail as it raises an Import Error
raise ImportError
def returnbadfunc(self):
def badfunc():
# This should import but it should fail type check
pass
return badfunc
def returnbadclass(self):
# This should import But it should fail subclass type check
class badclass:
pass
return badclass
def test_working(self):
"""This should work fine"""
mock_entry_working = mock.create_autospec(EntryPoint)
mock_entry_working.name = "Working"
mock_entry_working.load = self.successfulimport
populate_entry_points([mock_entry_working])
def test_import_error(self):
"""This raises an import error on load to test that it is handled correctly"""
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_importerror = mock.create_autospec(EntryPoint)
mock_entry_importerror.name = "IErr"
mock_entry_importerror.load = self.raiseimporterror
populate_entry_points([mock_entry_importerror])
except AstropyUserWarning as w:
if "ImportError" in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
def test_bad_func(self):
"""This returns a function which fails the type check"""
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_badfunc = mock.create_autospec(EntryPoint)
mock_entry_badfunc.name = "BadFunc"
mock_entry_badfunc.load = self.returnbadfunc
populate_entry_points([mock_entry_badfunc])
except AstropyUserWarning as w:
if "Class" in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
def test_bad_class(self):
"""This returns a class which doesn't inherient from fitter """
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_badclass = mock.create_autospec(EntryPoint)
mock_entry_badclass.name = "BadClass"
mock_entry_badclass.load = self.returnbadclass
populate_entry_points([mock_entry_badclass])
except AstropyUserWarning as w:
if 'modeling.Fitter' in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
@pytest.mark.skipif('not HAS_SCIPY')
class Test1DFittingWithOutlierRemoval:
def setup_class(self):
self.x = np.linspace(-5., 5., 200)
self.model_params = (3.0, 1.3, 0.8)
def func(p, x):
return p[0]*np.exp(-0.5*(x - p[1])**2/p[2]**2)
self.y = func(self.model_params, self.x)
def test_with_fitters_and_sigma_clip(self):
import scipy.stats as stats
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.x.shape)
self.y += (np.random.normal(0., 0.2, self.x.shape) +
c*np.random.normal(3.0, 5.0, self.x.shape))
g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
# test with Levenberg-Marquardt Least Squares fitter
fit = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
# test with Sequential Least Squares Programming fitter
fit = FittingWithOutlierRemoval(SLSQPLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
# test with Simplex LSQ fitter
fit = FittingWithOutlierRemoval(SimplexLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, atol=1e-1)
@pytest.mark.skipif('not HAS_SCIPY')
class Test2DFittingWithOutlierRemoval:
def setup_class(self):
self.y, self.x = np.mgrid[-3:3:128j, -3:3:128j]
self.model_params = (3.0, 1.0, 0.0, 0.8, 0.8)
def Gaussian_2D(p, pos):
return p[0]*np.exp(-0.5*(pos[0] - p[2])**2 / p[4]**2 -
0.5*(pos[1] - p[1])**2 / p[3]**2)
self.z = Gaussian_2D(self.model_params, np.array([self.y, self.x]))
def initial_guess(self, data, pos):
y = pos[0]
x = pos[1]
"""computes the centroid of the data as the initial guess for the
center position"""
wx = x * data
wy = y * data
total_intensity = np.sum(data)
x_mean = np.sum(wx) / total_intensity
y_mean = np.sum(wy) / total_intensity
x_to_pixel = x[0].size / (x[x[0].size - 1][x[0].size - 1] - x[0][0])
y_to_pixel = y[0].size / (y[y[0].size - 1][y[0].size - 1] - y[0][0])
x_pos = np.around(x_mean * x_to_pixel + x[0].size / 2.).astype(int)
y_pos = np.around(y_mean * y_to_pixel + y[0].size / 2.).astype(int)
amplitude = data[y_pos][x_pos]
return amplitude, x_mean, y_mean
def test_with_fitters_and_sigma_clip(self):
import scipy.stats as stats
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.z.shape)
self.z += (np.random.normal(0., 0.2, self.z.shape) +
c*np.random.normal(self.z, 2.0, self.z.shape))
guess = self.initial_guess(self.z, np.array([self.y, self.x]))
g2_init = models.Gaussian2D(amplitude=guess[0], x_mean=guess[1],
y_mean=guess[2], x_stddev=0.75,
y_stddev=1.25)
# test with Levenberg-Marquardt Least Squares fitter
fit = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
# test with Sequential Least Squares Programming fitter
fit = FittingWithOutlierRemoval(SLSQPLSQFitter(), sigma_clip, niter=3,
sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
# test with Simplex LSQ fitter
fit = FittingWithOutlierRemoval(SimplexLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
def test_1d_set_fitting_with_outlier_removal():
"""Test model set fitting with outlier removal (issue #6819)"""
poly_set = models.Polynomial1D(2, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
x = np.arange(10)
y = np.array([2.5*x - 4, 2*x*x + x + 10])
y[1,5] = -1000 # outlier
poly_set, filt_y = fitter(poly_set, x, y)
assert_allclose(poly_set.c0, [-4., 10.], atol=1e-14)
assert_allclose(poly_set.c1, [2.5, 1.], atol=1e-14)
assert_allclose(poly_set.c2, [0., 2.], atol=1e-14)
def test_2d_set_axis_2_fitting_with_outlier_removal():
"""Test fitting 2D model set (axis 2) with outlier removal (issue #6819)"""
poly_set = models.Polynomial2D(1, n_models=2, model_set_axis=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
y, x = np.mgrid[0:5, 0:5]
z = np.rollaxis(np.array([x+y, 1-0.1*x+0.2*y]), 0, 3)
z[3,3:5,0] = 100. # outliers
poly_set, filt_z = fitter(poly_set, x, y, z)
assert_allclose(poly_set.c0_0, [[[0., 1.]]], atol=1e-14)
assert_allclose(poly_set.c1_0, [[[1., -0.1]]], atol=1e-14)
assert_allclose(poly_set.c0_1, [[[1., 0.2]]], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestWeightedFittingWithOutlierRemoval:
"""Issue #7020 """
def setup_class(self):
# values of x,y not important as we fit y(x,y) = p0 model here
self.y, self.x = np.mgrid[0:20, 0:20]
self.z = np.mod(self.x + self.y, 2) * 2 - 1 # -1,1 chessboard
self.weights = np.mod(self.x + self.y, 2) * 2 + 1 # 1,3 chessboard
self.z[0,0] = 1000.0 # outlier
self.z[0,1] = 1000.0 # outlier
self.x1d = self.x.flatten()
self.z1d = self.z.flatten()
self.weights1d = self.weights.flatten()
def test_1d_without_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d)
assert_allclose(fit.parameters[0], self.z1d.mean(), atol=10**(-2))
def test_1d_without_weights_with_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, mask = fitter(model, self.x1d, self.z1d)
assert((~mask).sum() == self.z1d.size - 2)
assert(mask[0] and mask[1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2)) # with removed outliers mean is 0.0
def test_1d_with_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_1d_with_weights_with_sigma_clip(self):
"""smoke test for #7020 - fails without fitting.py patch because weights does not propagate"""
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, filtered = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
assert(fit.parameters[0] < 1.0) # outliers didn't pull it out of [-1:1] because they had been removed
def test_1d_set_with_common_weights_with_sigma_clip(self):
"""added for #6819 (1D model set with weights in common)"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
z1d = np.array([self.z1d, self.z1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=self.weights1d)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_2d_without_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x, self.y, self.z)
assert_allclose(fit.parameters[0], self.z.mean(), atol=10**(-2))
def test_2d_without_weights_with_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, mask = fitter(model, self.x, self.y, self.z)
assert((~mask).sum() == self.z.size - 2)
assert(mask[0,0] and mask[0,1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2))
def test_2d_with_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LevMarLSQFitter() # LinearLSQFitter doesn't handle weights properly in 2D
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_2d_with_weights_with_sigma_clip(self):
"""smoke test for #7020 - fails without fitting.py patch because weights does not propagate"""
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, filtered = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
assert(fit.parameters[0] < 1.0) # outliers didn't pull it out of [-1:1] because they had been removed
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitters_with_weights():
"""Issue #5737 """
Xin, Yin = np.mgrid[0:21, 0:21]
fitter = LevMarLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
# Non-linear model
g2 = models.Gaussian2D(10, 10, 9, 2, 3)
z = g2(Xin, Yin)
gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig)
assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2))
# Linear model
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig)
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitters_interface():
"""
Test that **kwargs work with all optimizers.
This is a basic smoke test.
"""
levmar = LevMarLSQFitter()
slsqp = SLSQPLSQFitter()
simplex = SimplexLSQFitter()
kwargs = {'maxiter': 77, 'verblevel': 1, 'epsilon': 1e-2, 'acc': 1e-6}
simplex_kwargs = {'maxiter': 77, 'verblevel': 1, 'acc': 1e-6}
model = models.Gaussian1D(10, 4, .3)
x = np.arange(21)
y = model(x)
slsqp_model = slsqp(model, x, y, **kwargs)
simplex_model = simplex(model, x, y, **simplex_kwargs)
kwargs.pop('verblevel')
lm_model = levmar(model, x, y, **kwargs)
|
91a951acc9b5b5a1ce356fa5ffc43826a1ee6dc3c45d1f067cdcdbe5d5b11207 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from .. import models, InputParameterError
from ...coordinates import Angle
from .. import fitting
from ...tests.helper import catch_warnings
from ...utils.exceptions import AstropyDeprecationWarning
try:
from scipy import optimize # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
def test_Trapezoid1D():
"""Regression test for https://github.com/astropy/astropy/issues/1721"""
model = models.Trapezoid1D(amplitude=4.2, x_0=2.0, width=1.0, slope=3)
xx = np.linspace(0, 4, 8)
yy = model(xx)
yy_ref = [0., 1.41428571, 3.12857143, 4.2, 4.2, 3.12857143, 1.41428571, 0.]
assert_allclose(yy, yy_ref, rtol=0, atol=1e-6)
def test_Gaussian2D():
"""
Test rotated elliptical Gaussian2D model.
https://github.com/astropy/astropy/pull/2038
"""
model = models.Gaussian2D(4.2, 1.7, 3.1, x_stddev=5.1, y_stddev=3.3,
theta=np.pi/6.)
y, x = np.mgrid[0:5, 0:5]
g = model(x, y)
g_ref = [[3.01907812, 2.99051889, 2.81271552, 2.5119566, 2.13012709],
[3.55982239, 3.6086023, 3.4734158, 3.17454575, 2.75494838],
[3.88059142, 4.0257528, 3.96554926, 3.70908389, 3.29410187],
[3.91095768, 4.15212857, 4.18567526, 4.00652015, 3.64146544],
[3.6440466, 3.95922417, 4.08454159, 4.00113878, 3.72161094]]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
assert_allclose([model.x_fwhm, model.y_fwhm],
[12.009582229657841, 7.7709061486021325])
def test_Gaussian2DCovariance():
"""
Test rotated elliptical Gaussian2D model when cov_matrix is input.
https://github.com/astropy/astropy/pull/2199
"""
cov_matrix = [[49., -16.], [-16., 9.]]
model = models.Gaussian2D(17., 2.0, 2.5, cov_matrix=cov_matrix)
y, x = np.mgrid[0:5, 0:5]
g = model(x, y)
g_ref = [[4.3744505, 5.8413977, 7.42988694, 9.00160175, 10.38794269],
[8.83290201, 10.81772851, 12.61946384, 14.02225593, 14.84113227],
[13.68528889, 15.37184621, 16.44637743, 16.76048705, 16.26953638],
[16.26953638, 16.76048705, 16.44637743, 15.37184621, 13.68528889],
[14.84113227, 14.02225593, 12.61946384, 10.81772851, 8.83290201]]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
def test_Gaussian2DRotation():
amplitude = 42
x_mean, y_mean = 0, 0
x_stddev, y_stddev = 2, 3
theta = Angle(10, 'deg')
pars = dict(amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev)
rotation = models.Rotation2D(angle=theta.degree)
point1 = (x_mean + 2 * x_stddev, y_mean + 2 * y_stddev)
point2 = rotation(*point1)
g1 = models.Gaussian2D(theta=0, **pars)
g2 = models.Gaussian2D(theta=theta.radian, **pars)
value1 = g1(*point1)
value2 = g2(*point2)
assert_allclose(value1, value2)
def test_Gaussian2D_invalid_inputs():
x_stddev = 5.1
y_stddev = 3.3
theta = 10
cov_matrix = [[49., -16.], [-16., 9.]]
# first make sure the valid ones are OK
models.Gaussian2D()
models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=theta)
models.Gaussian2D(x_stddev=None, y_stddev=y_stddev, theta=theta)
models.Gaussian2D(x_stddev=x_stddev, y_stddev=None, theta=theta)
models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=None)
models.Gaussian2D(cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(x_stddev=0, cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(y_stddev=0, cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(theta=0, cov_matrix=cov_matrix)
def test_moffat_fwhm():
ans = 34.641016151377542
kwargs = {'gamma': 10, 'alpha': 0.5}
m1 = models.Moffat1D(**kwargs)
m2 = models.Moffat2D(**kwargs)
assert_allclose([m1.fwhm, m2.fwhm], ans)
def test_RedshiftScaleFactor():
"""Like ``test_ScaleModel()``."""
# Scale by a scalar
m = models.RedshiftScaleFactor(0.4)
assert m(0) == 0
assert_array_equal(m([1, 2]), [1.4, 2.8])
assert_allclose(m.inverse(m([1, 2])), [1, 2])
# Scale by a list
m = models.RedshiftScaleFactor([-0.5, 0, 0.5], n_models=3)
assert_array_equal(m(0), 0)
assert_array_equal(m([1, 2], model_set_axis=False),
[[0.5, 1], [1, 2], [1.5, 3]])
assert_allclose(m.inverse(m([1, 2], model_set_axis=False)),
[[1, 2], [1, 2], [1, 2]])
def test_Ellipse2D():
"""Test Ellipse2D model."""
amplitude = 7.5
x0, y0 = 15, 15
theta = Angle(45, 'deg')
em = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta.radian)
y, x = np.mgrid[0:30, 0:30]
e = em(x, y)
assert np.all(e[e > 0] == amplitude)
assert e[y0, x0] == amplitude
rotation = models.Rotation2D(angle=theta.degree)
point1 = [2, 0] # Rotation2D center is (0, 0)
point2 = rotation(*point1)
point1 = np.array(point1) + [x0, y0]
point2 = np.array(point2) + [x0, y0]
e1 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=0.)
e2 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=theta.radian)
assert e1(*point1) == e2(*point2)
def test_Ellipse2D_circular():
"""Test that circular Ellipse2D agrees with Disk2D [3736]."""
amplitude = 7.5
radius = 10
size = (radius * 2) + 1
y, x = np.mgrid[0:size, 0:size]
ellipse = models.Ellipse2D(amplitude, radius, radius, radius, radius,
theta=0)(x, y)
disk = models.Disk2D(amplitude, radius, radius, radius)(x, y)
assert np.all(ellipse == disk)
def test_Scale_inverse():
m = models.Scale(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Multiply_inverse():
m = models.Multiply(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Shift_inverse():
m = models.Shift(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
@pytest.mark.skipif('not HAS_SCIPY')
def test_Shift_model_levmar_fit():
"""Test fitting Shift model with LevMarLSQFitter (issue #6103)."""
init_model = models.Shift()
x = np.arange(10)
y = x+0.1
fitter = fitting.LevMarLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [0.1], atol=1e-15)
def test_Shift_model_set_linear_fit():
"""Test linear fitting of Shift model (issue #6103)."""
init_model = models.Shift(offset=[0, 0], n_models=2)
x = np.arange(10)
yy = np.array([x+0.1, x-0.2])
fitter = fitting.LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.parameters, [0.1, -0.2], atol=1e-15)
@pytest.mark.parametrize('Model', (models.Scale, models.Multiply))
def test_Scale_model_set_linear_fit(Model):
"""Test linear fitting of Scale model (#6103)."""
init_model = Model(factor=[0, 0], n_models=2)
x = np.arange(-3, 7)
yy = np.array([1.15*x, 0.96*x])
fitter = fitting.LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.parameters, [1.15, 0.96], atol=1e-15)
# https://github.com/astropy/astropy/issues/6178
def test_Ring2D_rout():
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=2, r_out=5)
assert m.width.value == 3
@pytest.mark.skipif("not HAS_SCIPY")
def test_Voigt1D():
voi = models.Voigt1D(amplitude_L=-0.5, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0)
xarr = np.linspace(-5.0, 5.0, num=40)
yarr = voi(xarr)
voi_init = models.Voigt1D(amplitude_L=-1.0, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0)
fitter = fitting.LevMarLSQFitter()
voi_fit = fitter(voi_init, xarr, yarr)
assert_allclose(voi_fit.param_sets, voi.param_sets)
@pytest.mark.skipif("not HAS_SCIPY")
def test_compound_models_with_class_variables():
models_2d = [models.AiryDisk2D, models.Sersic2D]
models_1d = [models.Sersic1D]
for model_2d in models_2d:
class CompoundModel2D(models.Const2D + model_2d):
pass
x, y = np.mgrid[:10, :10]
f = CompoundModel2D()(x, y)
assert f.shape == (10, 10)
for model_1d in models_1d:
class CompoundModel1D(models.Const1D + model_1d):
pass
x = np.arange(10)
f = CompoundModel1D()(x)
assert f.shape == (10,)
|
9c8d6ebdba51eabdb481fb01c6e1e66c5ecf5970554fb67ca7cd57f29aaf8767 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests that relate to evaluating models with quantity parameters
"""
import numpy as np
import pytest
from numpy.testing import assert_allclose
from ..core import Model
from ..models import Gaussian1D, Shift, Scale, Pix2Sky_TAN
from ... import units as u
from ...units import UnitsError
from ...tests.helper import assert_quantity_allclose
# We start off by taking some simple cases where the units are defined by
# whatever the model is initialized with, and we check that the model evaluation
# returns quantities.
def test_evaluate_with_quantities():
"""
Test evaluation of a single model with Quantity parameters that do
not explicitly require units.
"""
# We create two models here - one with quantities, and one without. The one
# without is used to create the reference values for comparison.
g = Gaussian1D(1, 1, 0.1)
gq = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# We first check that calling the Gaussian with quantities returns the
# expected result
assert_quantity_allclose(gq(1 * u.m), g(1) * u.J)
# Units have to be specified for the Gaussian with quantities - if not, an
# error is raised
with pytest.raises(UnitsError) as exc:
gq(1)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', (dimensionless), could not be "
"converted to required input units of m (length)")
# However, zero is a special case
assert_quantity_allclose(gq(0), g(0) * u.J)
# We can also evaluate models with equivalent units
assert_allclose(gq(0.0005 * u.km).value, g(0.5))
# But not with incompatible units
with pytest.raises(UnitsError) as exc:
gq(3 * u.s)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', s (time), could not be "
"converted to required input units of m (length)")
# We also can't evaluate the model without quantities with a quantity
with pytest.raises(UnitsError) as exc:
g(3 * u.m)
# TODO: determine what error message should be here
# assert exc.value.args[0] == ("Units of input 'x', m (length), could not be "
# "converted to required dimensionless input")
def test_evaluate_with_quantities_and_equivalencies():
"""
We now make sure that equivalencies are correctly taken into account
"""
g = Gaussian1D(1 * u.Jy, 10 * u.nm, 2 * u.nm)
# We aren't setting the equivalencies, so this won't work
with pytest.raises(UnitsError) as exc:
g(30 * u.PHz)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', PHz (frequency), could "
"not be converted to required input units of "
"nm (length)")
# But it should now work if we pass equivalencies when evaluating
assert_quantity_allclose(g(30 * u.PHz, equivalencies={'x': u.spectral()}),
g(9.993081933333332 * u.nm))
class MyTestModel(Model):
inputs = ('a', 'b')
outputs = ('f',)
def evaluate(self, a, b):
print('a', a)
print('b', b)
return a * b
class TestInputUnits():
def setup_method(self, method):
self.model = MyTestModel()
def test_evaluate(self):
# We should be able to evaluate with anything
assert_quantity_allclose(self.model(3, 5), 15)
assert_quantity_allclose(self.model(4 * u.m, 5), 20 * u.m)
assert_quantity_allclose(self.model(3 * u.deg, 5), 15 * u.deg)
def test_input_units(self):
self.model._input_units = {'a': u.deg}
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad)
assert_quantity_allclose(self.model(4 * u.rad, 2 * u.s), 8 * u.rad * u.s)
with pytest.raises(UnitsError) as exc:
self.model(4 * u.s, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'a', s (time), could not be "
"converted to required input units of deg (angle)")
with pytest.raises(UnitsError) as exc:
self.model(3, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'a', (dimensionless), could "
"not be converted to required input units of deg (angle)")
def test_input_units_allow_dimensionless(self):
self.model._input_units = {'a': u.deg}
self.model._input_units_allow_dimensionless = True
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad)
with pytest.raises(UnitsError) as exc:
self.model(4 * u.s, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'a', s (time), could not be "
"converted to required input units of deg (angle)")
assert_quantity_allclose(self.model(3, 3), 9)
def test_input_units_strict(self):
self.model._input_units = {'a': u.deg}
self.model._input_units_strict = True
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
result = self.model(np.pi * u.rad, 2)
assert_quantity_allclose(result, 360 * u.deg)
assert result.unit is u.deg
def test_input_units_equivalencies(self):
self.model._input_units = {'a': u.micron}
with pytest.raises(UnitsError) as exc:
self.model(3 * u.PHz, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'a', PHz (frequency), could "
"not be converted to required input units of "
"micron (length)")
self.model.input_units_equivalencies = {'a': u.spectral()}
assert_quantity_allclose(self.model(3 * u.PHz, 3),
3 * (3 * u.PHz).to(u.micron, equivalencies=u.spectral()))
def test_return_units(self):
self.model._input_units = {'a': u.deg}
self.model._return_units = {'f': u.rad}
result = self.model(3 * u.deg, 4)
assert_quantity_allclose(result, 12 * u.deg)
assert result.unit is u.rad
def test_return_units_scalar(self):
# Check that return_units also works when giving a single unit since
# there is only one output, so is unambiguous.
self.model._input_units = {'a': u.deg}
self.model._return_units = u.rad
result = self.model(3 * u.deg, 4)
assert_quantity_allclose(result, 12 * u.deg)
assert result.unit is u.rad
def test_and_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 & s2
out = cs(10 * u.arcsecond, 20 * u.arcsecond)
assert_quantity_allclose(out[0], 10 * u.deg + 10 * u.arcsec)
assert_quantity_allclose(out[1], 10 * u.deg + 20 * u.arcsec)
def test_plus_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 + s2
out = cs(10 * u.arcsecond)
assert_quantity_allclose(out, 20 * u.deg + 20 * u.arcsec)
def test_compound_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
out = cs(10 * u.arcsecond)
assert_quantity_allclose(out, 20 * u.deg + 10 * u.arcsec)
def test_compound_input_units_fail():
"""
Test incompatible units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
with pytest.raises(UnitsError):
cs(10 * u.pix)
def test_compound_incompatible_units_fail():
"""
Test incompatible model units in chain.
"""
s1 = Shift(10 * u.pix)
s2 = Shift(10 * u.deg)
cs = s1 | s2
with pytest.raises(UnitsError):
cs(10 * u.pix)
def test_compound_pipe_equiv_call():
"""
Check that equivalencies work when passed to evaluate, for a chained model
(which has one input).
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
out = cs(10 * u.pix, equivalencies={'x': u.pixel_scale(0.5 * u.deg / u.pix)})
assert_quantity_allclose(out, 25 * u.deg)
def test_compound_and_equiv_call():
"""
Check that equivalencies work when passed to evaluate, for a compsite model
with two inputs.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 & s2
out = cs(10 * u.pix, 10 * u.pix, equivalencies={'x0': u.pixel_scale(0.5 * u.deg / u.pix),
'x1': u.pixel_scale(0.5 * u.deg / u.pix)})
assert_quantity_allclose(out[0], 15 * u.deg)
assert_quantity_allclose(out[1], 15 * u.deg)
def test_compound_input_units_equivalencies():
"""
Test setting input_units_equivalencies on one of the models.
"""
s1 = Shift(10 * u.deg)
s1.input_units_equivalencies = {'x': u.pixel_scale(0.5 * u.deg / u.pix)}
s2 = Shift(10 * u.deg)
sp = Shift(10 * u.pix)
cs = s1 | s2
out = cs(10 * u.pix)
assert_quantity_allclose(out, 25 * u.deg)
cs = sp | s1
out = cs(10 * u.pix)
assert_quantity_allclose(out, 20 * u.deg)
cs = s1 & s2
cs = cs.rename('TestModel')
out = cs(20 * u.pix, 10 * u.deg)
assert_quantity_allclose(out, 20 * u.deg)
with pytest.raises(UnitsError) as exc:
out = cs(20 * u.pix, 10 * u.pix)
assert exc.value.args[0] == "TestModel: Units of input 'x1', pix (unknown), could not be converted to required input units of deg (angle)"
def test_compound_input_units_strict():
"""
Test setting input_units_strict on one of the models.
"""
class ScaleDegrees(Scale):
input_units = {'x': u.deg}
s1 = ScaleDegrees(2)
s2 = Scale(2)
cs = s1 | s2
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
assert out.unit is u.deg # important since this tests input_units_strict
cs = s2 | s1
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
assert out.unit is u.deg # important since this tests input_units_strict
cs = s1 & s2
out = cs(10 * u.arcsec, 10 * u.arcsec)
assert_quantity_allclose(out, 20 * u.arcsec)
assert out[0].unit is u.deg
assert out[1].unit is u.arcsec
def test_compound_input_units_allow_dimensionless():
"""
Test setting input_units_allow_dimensionless on one of the models.
"""
class ScaleDegrees(Scale):
input_units = {'x': u.deg}
s1 = ScaleDegrees(2)
s1._input_units_allow_dimensionless = True
s2 = Scale(2)
cs = s1 | s2
cs = cs.rename('TestModel')
out = cs(10)
assert_quantity_allclose(out, 40 * u.one)
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10 * u.m)
assert exc.value.args[0] == "TestModel: Units of input 'x', m (length), could not be converted to required input units of deg (angle)"
s1._input_units_allow_dimensionless = False
cs = s1 | s2
cs = cs.rename('TestModel')
with pytest.raises(UnitsError) as exc:
out = cs(10)
assert exc.value.args[0] == "TestModel: Units of input 'x', (dimensionless), could not be converted to required input units of deg (angle)"
s1._input_units_allow_dimensionless = True
cs = s2 | s1
cs = cs.rename('TestModel')
out = cs(10)
assert_quantity_allclose(out, 40 * u.one)
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10 * u.m)
assert exc.value.args[0] == "ScaleDegrees: Units of input 'x', m (length), could not be converted to required input units of deg (angle)"
s1._input_units_allow_dimensionless = False
cs = s2 | s1
with pytest.raises(UnitsError) as exc:
out = cs(10)
assert exc.value.args[0] == "ScaleDegrees: Units of input 'x', (dimensionless), could not be converted to required input units of deg (angle)"
s1._input_units_allow_dimensionless = True
s1 = ScaleDegrees(2)
s1._input_units_allow_dimensionless = True
s2 = ScaleDegrees(2)
s2._input_units_allow_dimensionless = False
cs = s1 & s2
cs = cs.rename('TestModel')
out = cs(10, 10 * u.arcsec)
assert_quantity_allclose(out[0], 20 * u.one)
assert_quantity_allclose(out[1], 20 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10, 10)
assert exc.value.args[0] == "TestModel: Units of input 'x1', (dimensionless), could not be converted to required input units of deg (angle)"
def test_compound_return_units():
"""
Test that return_units on the first model in the chain is respected for the
input to the second.
"""
class PassModel(Model):
inputs = ('x', 'y')
outputs = ('x', 'y')
@property
def input_units(self):
""" Input units. """
return {'x': u.deg, 'y': u.deg}
@property
def return_units(self):
""" Output units. """
return {'x': u.deg, 'y': u.deg}
def evaluate(self, x, y):
return x.value, y.value
cs = Pix2Sky_TAN() | PassModel()
assert_quantity_allclose(cs(0*u.deg, 0*u.deg), (0, 90)*u.deg)
|
a22f3d34d1b76527421c3eee3c33d947aa5bf2f318738da16c7b67e4ac80a1d7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
from copy import deepcopy
import pickle
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from ...utils import minversion
from ..core import Model, ModelDefinitionError
from ..parameters import Parameter
from ..models import (Const1D, Shift, Scale, Rotation2D, Gaussian1D,
Gaussian2D, Polynomial1D, Polynomial2D,
Chebyshev2D, Legendre2D, Chebyshev1D, Legendre1D,
AffineTransformation2D, Identity, Mapping,
Tabular1D)
try:
import scipy
from scipy import optimize # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
HAS_SCIPY_14 = HAS_SCIPY and minversion(scipy, "0.14")
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, 5.0),
(lambda x, y: x - y, -1.0),
(lambda x, y: x * y, 6.0),
(lambda x, y: x / y, 2.0 / 3.0),
(lambda x, y: x ** y, 8.0)])
def test_two_model_class_arithmetic_1d(expr, result):
# Const1D is perhaps the simplest model to test basic arithmetic with.
# TODO: Should define more tests later on for more complicated
# combinations of models
S = expr(Const1D, Const1D)
assert issubclass(S, Model)
assert S.n_inputs == 1
assert S.n_outputs == 1
# Initialize an instance of the model, providing values for the two
# "amplitude" parameters
s = S(2, 3)
# It shouldn't matter what input we evaluate on since this is a constant
# function
out = s(0)
assert out == result
assert isinstance(out, float)
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, [5.0, 5.0]),
(lambda x, y: x - y, [-1.0, -1.0]),
(lambda x, y: x * y, [6.0, 6.0]),
(lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]),
(lambda x, y: x ** y, [8.0, 8.0])])
def test_model_set(expr, result):
s = expr(Const1D((2, 2), n_models=2), Const1D((3, 3), n_models=2))
out = s(0, model_set_axis=False)
assert_array_equal(out, result)
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, [5.0, 5.0]),
(lambda x, y: x - y, [-1.0, -1.0]),
(lambda x, y: x * y, [6.0, 6.0]),
(lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]),
(lambda x, y: x ** y, [8.0, 8.0])])
def test_model_set_raises_value_error(expr, result):
"""Check that creating model sets with components whose _n_models are
different raise a value error
"""
with pytest.raises(ValueError):
s = expr(Const1D((2, 2), n_models=2), Const1D(3, n_models=1))
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, 5.0),
(lambda x, y: x - y, -1.0),
(lambda x, y: x * y, 6.0),
(lambda x, y: x / y, 2.0 / 3.0),
(lambda x, y: x ** y, 8.0)])
def test_two_model_instance_arithmetic_1d(expr, result):
"""
Like test_two_model_class_arithmetic_1d, but creates a new model from two
model *instances* with fixed parameters.
"""
s = expr(Const1D(2), Const1D(3))
assert isinstance(s, Model)
assert s.n_inputs == 1
assert s.n_outputs == 1
out = s(0)
assert out == result
assert isinstance(out, float)
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, 5.0),
(lambda x, y: x - y, -1.0),
(lambda x, y: x * y, 6.0),
(lambda x, y: x / y, 2.0 / 3.0),
(lambda x, y: x ** y, 8.0)])
def test_two_model_mixed_arithmetic_1d(expr, result):
"""
Like test_two_model_class_arithmetic_1d, but creates a new model from an
expression of one model class with one model instance (and vice-versa).
"""
S1 = expr(Const1D, Const1D(3))
S2 = expr(Const1D(2), Const1D)
for cls in (S1, S2):
assert issubclass(cls, Model)
assert cls.n_inputs == 1
assert cls.n_outputs == 1
# Requires values for both amplitudes even though one of them them has a
# default
# TODO: We may wish to fix that eventually, so that if a parameter has a
# default it doesn't *have* to be given in the init
s1 = S1(2, 3)
s2 = S2(2, 3)
for out in (s1(0), s2(0)):
assert out == result
assert isinstance(out, float)
def test_simple_two_model_class_compose_1d():
"""
Shift and Scale are two of the simplest models to test model composition
with.
"""
S1 = Shift | Scale # First shift then scale
assert issubclass(S1, Model)
assert S1.n_inputs == 1
assert S1.n_outputs == 1
s1 = S1(2, 3) # Shift by 2 and scale by 3
assert s1(1) == 9.0
S2 = Scale | Shift # First scale then shift
assert issubclass(S2, Model)
assert S2.n_inputs == 1
assert S2.n_outputs == 1
s2 = S2(2, 3) # Scale by 2 then shift by 3
assert s2(1) == 5.0
# Test with array inputs
assert_array_equal(s2([1, 2, 3]), [5.0, 7.0, 9.0])
def test_simple_two_model_class_compose_2d():
"""
A simple example consisting of two rotations.
"""
R = Rotation2D | Rotation2D
assert issubclass(R, Model)
assert R.n_inputs == 2
assert R.n_outputs == 2
r1 = R(45, 45) # Rotate twice by 45 degrees
assert_allclose(r1(0, 1), (-1, 0), atol=1e-10)
r2 = R(90, 90) # Rotate twice by 90 degrees
assert_allclose(r2(0, 1), (0, -1), atol=1e-10)
# Compose R with itself to produce 4 rotations
R2 = R | R
r3 = R2(45, 45, 45, 45)
assert_allclose(r3(0, 1), (0, -1), atol=1e-10)
def test_n_submodels():
"""
Test that CompoundModel.n_submodels properly returns the number
of components.
"""
g2 = Gaussian1D() + Gaussian1D()
assert g2.n_submodels() == 2
g3 = g2 + Gaussian1D()
assert g3.n_submodels() == 3
g5 = g3 | g2
assert g5.n_submodels() == 5
g7 = g5 / g2
assert g7.n_submodels() == 7
# make sure it works as class method
p = Polynomial1D + Polynomial1D
assert p.n_submodels() == 2
def test_expression_formatting():
"""
Test that the expression strings from compound models are formatted
correctly.
"""
# For the purposes of this test it doesn't matter a great deal what
# model(s) are used in the expression, I don't think
G = Gaussian1D
G2 = Gaussian2D
M = G + G
assert M._format_expression() == '[0] + [1]'
M = G + G + G
assert M._format_expression() == '[0] + [1] + [2]'
M = G + G * G
assert M._format_expression() == '[0] + [1] * [2]'
M = G * G + G
assert M._format_expression() == '[0] * [1] + [2]'
M = G + G * G + G
assert M._format_expression() == '[0] + [1] * [2] + [3]'
M = (G + G) * (G + G)
assert M._format_expression() == '([0] + [1]) * ([2] + [3])'
# This example uses parentheses in the expression, but those won't be
# preserved in the expression formatting since they technically aren't
# necessary, and there's no way to know that they were originally
# parenthesized (short of some deep, and probably not worthwhile
# introspection)
M = (G * G) + (G * G)
assert M._format_expression() == '[0] * [1] + [2] * [3]'
M = G ** G
assert M._format_expression() == '[0] ** [1]'
M = G + G ** G
assert M._format_expression() == '[0] + [1] ** [2]'
M = (G + G) ** G
assert M._format_expression() == '([0] + [1]) ** [2]'
M = G + G | G
assert M._format_expression() == '[0] + [1] | [2]'
M = G + (G | G)
assert M._format_expression() == '[0] + ([1] | [2])'
M = G & G | G2
assert M._format_expression() == '[0] & [1] | [2]'
M = G & (G | G)
assert M._format_expression() == '[0] & ([1] | [2])'
def test_indexing_on_class():
"""
Test indexing on compound model class objects, including cases where the
submodels are classes, as well as instances, or both.
"""
g = Gaussian1D(1, 2, 3, name='g')
p = Polynomial1D(2, name='p')
M = Gaussian1D + Const1D
assert M[0] is Gaussian1D
assert M[1] is Const1D
assert M['Gaussian1D'] is M[0]
assert M['Const1D'] is M[1]
M = Gaussian1D + p
assert M[0] is Gaussian1D
assert isinstance(M['p'], Polynomial1D)
m = g + p
assert isinstance(m[0], Gaussian1D)
assert isinstance(m[1], Polynomial1D)
assert isinstance(m['g'], Gaussian1D)
assert isinstance(m['p'], Polynomial1D)
# Test negative indexing
assert isinstance(m[-1], Polynomial1D)
assert isinstance(m[-2], Gaussian1D)
with pytest.raises(IndexError):
m[42]
with pytest.raises(IndexError):
m['foobar']
# TODO: It would be good if there were an easier way to interrogate a compound
# model class for what expression it represents. Not sure what that would look
# like though.
def test_slicing_on_class():
"""
Test slicing a simple compound model class using integers.
"""
A = Const1D.rename('A')
B = Const1D.rename('B')
C = Const1D.rename('C')
D = Const1D.rename('D')
E = Const1D.rename('E')
F = Const1D.rename('F')
M = A + B - C * D / E ** F
assert M[0:1] is A
# This test will also check that the correct parameter names are generated
# for each slice (fairly trivial in this case since all the submodels have
# the same parameter, but if any corner cases are found that aren't covered
# by this test we can do something different...)
assert M[0:1].param_names == ('amplitude',)
# This looks goofy but if you slice by name to the sub-model of the same
# name it should just return that model, logically.
assert M['A':'A'] is A
assert M['A':'A'].param_names == ('amplitude',)
assert M[5:6] is F
assert M[5:6].param_names == ('amplitude',)
assert M['F':'F'] is F
assert M['F':'F'].param_names == ('amplitude',)
# 1 + 2
assert M[:2](1, 2)(0) == 3
assert M[:2].param_names == ('amplitude_0', 'amplitude_1')
assert M[:'B'](1, 2)(0) == 3
assert M[:'B'].param_names == ('amplitude_0', 'amplitude_1')
# 2 - 3
assert M[1:3](2, 3)(0) == -1
assert M[1:3].param_names == ('amplitude_1', 'amplitude_2')
assert M['B':'C'](2, 3)(0) == -1
assert M['B':'C'].param_names == ('amplitude_1', 'amplitude_2')
# 3 * 4
assert M[2:4](3, 4)(0) == 12
assert M[2:4].param_names == ('amplitude_2', 'amplitude_3')
assert M['C':'D'](3, 4)(0) == 12
assert M['C':'D'].param_names == ('amplitude_2', 'amplitude_3')
# 4 / 5
assert M[3:5](4, 5)(0) == 0.8
assert M[3:5].param_names == ('amplitude_3', 'amplitude_4')
assert M['D':'E'](4, 5)(0) == 0.8
assert M['D':'E'].param_names == ('amplitude_3', 'amplitude_4')
# 5 ** 6
assert M[4:6](5, 6)(0) == 15625
assert M[4:6].param_names == ('amplitude_4', 'amplitude_5')
assert M['E':'F'](5, 6)(0) == 15625
assert M['E':'F'].param_names == ('amplitude_4', 'amplitude_5')
def test_slicing_on_instance():
"""
Test slicing a simple compound model class using integers.
"""
A = Const1D.rename('A')
B = Const1D.rename('B')
C = Const1D.rename('C')
D = Const1D.rename('D')
E = Const1D.rename('E')
F = Const1D.rename('F')
M = A + B - C * D / E ** F
m = M(1, 2, 3, 4, 5, 6)
assert isinstance(m[0:1], A)
assert isinstance(m['A':'A'], A)
assert isinstance(m[5:6], F)
assert isinstance(m['F':'F'], F)
# 1 + 2
assert m[:'B'](0) == 3
assert m[:'B'].param_names == ('amplitude_0', 'amplitude_1')
assert np.all(m[:'B'].parameters == [1, 2])
# 2 - 3
assert m['B':'C'](0) == -1
assert m['B':'C'].param_names == ('amplitude_1', 'amplitude_2')
assert np.all(m['B':'C'].parameters == [2, 3])
# 3 * 4
assert m['C':'D'](0) == 12
assert m['C':'D'].param_names == ('amplitude_2', 'amplitude_3')
assert np.all(m['C':'D'].parameters == [3, 4])
# 4 / 5
assert m['D':'E'](0) == 0.8
assert m['D':'E'].param_names == ('amplitude_3', 'amplitude_4')
assert np.all(m['D':'E'].parameters == [4, 5])
# 5 ** 6
assert m['E':'F'](0) == 15625
assert m['E':'F'].param_names == ('amplitude_4', 'amplitude_5')
assert np.all(m['E':'F'].parameters == [5, 6])
def test_indexing_on_instance():
"""Test indexing on compound model instances."""
M = Gaussian1D + Const1D
m = M(1, 0, 0.1, 2)
assert isinstance(m[0], Gaussian1D)
assert isinstance(m[1], Const1D)
assert isinstance(m['Gaussian1D'], Gaussian1D)
assert isinstance(m['Const1D'], Const1D)
# Test parameter equivalence
assert m[0].amplitude == 1 == m.amplitude_0
assert m[0].mean == 0 == m.mean_0
assert m[0].stddev == 0.1 == m.stddev_0
assert m[1].amplitude == 2 == m.amplitude_1
# Test that parameter value updates are symmetric between the compound
# model and the submodel returned by indexing
const = m[1]
m.amplitude_1 = 42
assert const.amplitude == 42
const.amplitude = 137
assert m.amplitude_1 == 137
# Similar couple of tests, but now where the compound model was created
# from model instances
g = Gaussian1D(1, 2, 3, name='g')
p = Polynomial1D(2, name='p')
m = g + p
assert m[0].name == 'g'
assert m[1].name == 'p'
assert m['g'].name == 'g'
assert m['p'].name == 'p'
poly = m[1]
m.c0_1 = 12345
assert poly.c0 == 12345
poly.c1 = 6789
assert m.c1_1 == 6789
# Ensure this did *not* modify the original models we used as templates
assert p.c0 == 0
assert p.c1 == 0
# Test negative indexing
assert isinstance(m[-1], Polynomial1D)
assert isinstance(m[-2], Gaussian1D)
with pytest.raises(IndexError):
m[42]
with pytest.raises(IndexError):
m['foobar']
def test_basic_compound_inverse():
"""
Test basic inversion of compound models in the limited sense supported for
models made from compositions and joins only.
"""
t = (Shift(2) & Shift(3)) | (Scale(2) & Scale(3)) | Rotation2D(90)
assert_allclose(t.inverse(*t(0, 1)), (0, 1))
@pytest.mark.parametrize('model', [
Shift(0) + Shift(0) | Shift(0),
Shift(0) - Shift(0) | Shift(0),
Shift(0) * Shift(0) | Shift(0),
Shift(0) / Shift(0) | Shift(0),
Shift(0) ** Shift(0) | Shift(0),
Gaussian1D(1, 2, 3) | Gaussian1D(4, 5, 6)])
def test_compound_unsupported_inverse(model):
"""
Ensure inverses aren't supported in cases where it shouldn't be.
"""
with pytest.raises(NotImplementedError):
model.inverse
def test_mapping_basic_permutations():
"""
Tests a couple basic examples of the Mapping model--specifically examples
that merely permute the outputs.
"""
x, y = Rotation2D(90)(1, 2)
RS = Rotation2D | Mapping((1, 0))
x_prime, y_prime = RS(90)(1, 2)
assert_allclose((x, y), (y_prime, x_prime))
# A more complicated permutation
M = Rotation2D & Scale
m = M(90, 2)
x, y, z = m(1, 2, 3)
MS = M | Mapping((2, 0, 1))
ms = MS(90, 2)
x_prime, y_prime, z_prime = ms(1, 2, 3)
assert_allclose((x, y, z), (y_prime, z_prime, x_prime))
def test_mapping_inverse():
"""Tests inverting a compound model that includes a `Mapping`."""
RS = Rotation2D & Scale
# Rotates 2 of the coordinates and scales the third--then rotates on a
# different axis and scales on the axis of rotation. No physical meaning
# here just a simple test
M = RS | Mapping([2, 0, 1]) | RS
m = M(12.1, 13.2, 14.3, 15.4)
assert_allclose((0, 1, 2), m.inverse(*m(0, 1, 2)), atol=1e-08)
def test_identity_input():
"""
Test a case where an Identity (or Mapping) model is the first in a chain
of composite models and thus is responsible for handling input broadcasting
properly.
Regression test for https://github.com/astropy/astropy/pull/3362
"""
ident1 = Identity(1)
shift = Shift(1)
rotation = Rotation2D(angle=90)
model = ident1 & shift | rotation
assert_allclose(model(1, 2), [-3.0, 1.0])
# Same test case but using class composition
TestModel = ident1 & Shift | Rotation2D
model = TestModel(offset_1=1, angle_2=90)
assert_allclose(model(1, 2), [-3.0, 1.0])
def test_slicing_on_instances_2():
"""
More slicing tests.
Regression test for https://github.com/embray/astropy/pull/10
"""
model_a = Shift(1, name='a')
model_b = Shift(2, name='b')
model_c = Rotation2D(3, name='c')
model_d = Scale(2, name='d')
model_e = Scale(3, name='e')
m = (model_a & model_b) | model_c | (model_d & model_e)
with pytest.raises(ModelDefinitionError):
# The slice can't actually be taken since the resulting model cannot be
# evaluated
assert m[1:].submodel_names == ('b', 'c', 'd', 'e')
assert m[:].submodel_names == ('a', 'b', 'c', 'd', 'e')
assert m['a':].submodel_names == ('a', 'b', 'c', 'd', 'e')
with pytest.raises(ModelDefinitionError):
assert m['c':'d'].submodel_names == ('c', 'd')
assert m[1:2].name == 'b'
assert m[2:7].submodel_names == ('c', 'd', 'e')
with pytest.raises(IndexError):
m['x']
with pytest.raises(IndexError):
m['a': 'r']
with pytest.raises(ModelDefinitionError):
assert m[-4:4].submodel_names == ('b', 'c', 'd')
with pytest.raises(ModelDefinitionError):
assert m[-4:-2].submodel_names == ('b', 'c')
def test_slicing_on_instances_3():
"""
Like `test_slicing_on_instances_2` but uses a compound model that does not
have any invalid slices due to the resulting model being invalid
(originally test_slicing_on_instances_2 passed without any
ModelDefinitionErrors being raised, but that was before we prevented
invalid models from being created).
"""
model_a = Shift(1, name='a')
model_b = Shift(2, name='b')
model_c = Gaussian1D(3, 0, 0.1, name='c')
model_d = Scale(2, name='d')
model_e = Scale(3, name='e')
m = (model_a + model_b) | model_c | (model_d + model_e)
assert m[1:].submodel_names == ('b', 'c', 'd', 'e')
assert m[:].submodel_names == ('a', 'b', 'c', 'd', 'e')
assert m['a':].submodel_names == ('a', 'b', 'c', 'd', 'e')
assert m['c':'d'].submodel_names == ('c', 'd')
assert m[1:2].name == 'b'
assert m[2:7].submodel_names == ('c', 'd', 'e')
with pytest.raises(IndexError):
m['x']
with pytest.raises(IndexError):
m['a': 'r']
assert m[-4:4].submodel_names == ('b', 'c', 'd')
assert m[-4:-2].submodel_names == ('b', 'c')
def test_slicing_on_instance_with_parameterless_model():
"""
Regression test to fix an issue where the indices attached to parameter
names on a compound model were not handled properly when one or more
submodels have no parameters. This was especially evident in slicing.
"""
p2 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3)
p1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3)
mapping = Mapping((0, 1, 0, 1))
offx = Shift(-2, name='x_translation')
offy = Shift(-1, name='y_translation')
aff = AffineTransformation2D(matrix=[[1, 2], [3, 4]], name='rotation')
model = mapping | (p1 & p2) | (offx & offy) | aff
assert model.param_names == ('c0_0_1', 'c1_0_1', 'c0_1_1',
'c0_0_2', 'c1_0_2', 'c0_1_2',
'offset_3', 'offset_4',
'matrix_5', 'translation_5')
assert model(1, 2) == (23.0, 53.0)
m = model[3:]
assert m.param_names == ('offset_3', 'offset_4', 'matrix_5',
'translation_5')
assert m(1, 2) == (1.0, 1.0)
def test_compound_model_with_nonstandard_broadcasting():
"""
Ensure that the ``standard_broadcasting`` flag is properly propagated when
creating compound models.
See the commit message for the commit in which this was added for more
details.
"""
offx = Shift(1)
offy = Shift(2)
rot = AffineTransformation2D([[0, -1], [1, 0]])
m = (offx & offy) | rot
x, y = m(0, 0)
assert x == -2
assert y == 1
# make sure conversion back to scalars is working properly
assert isinstance(x, float)
assert isinstance(y, float)
x, y = m([0, 1, 2], [0, 1, 2])
assert np.all(x == [-2, -3, -4])
assert np.all(y == [1, 2, 3])
def test_compound_model_classify_attributes():
"""
Regression test for an issue raised here:
https://github.com/astropy/astropy/pull/3231#discussion_r22221123
The issue is that part of the `help` implementation calls a utility
function called `inspect.classify_class_attrs`, which was leading to an
infinite recursion.
This is a useful test in its own right just in that it tests that compound
models can be introspected in some useful way without crashing--this works
as sort of a test of its somewhat complicated internal state management.
This test does not check any of the results of
`~inspect.classify_class_attrs`, though it might be useful to at some
point.
"""
inspect.classify_class_attrs(Gaussian1D + Gaussian1D)
def test_invalid_operands():
"""
Test that certain operators do not work with models whose inputs/outputs do
not match up correctly.
"""
with pytest.raises(ModelDefinitionError):
Rotation2D | Gaussian1D
with pytest.raises(ModelDefinitionError):
Rotation2D(90) | Gaussian1D(1, 0, 0.1)
with pytest.raises(ModelDefinitionError):
Rotation2D + Gaussian1D
with pytest.raises(ModelDefinitionError):
Rotation2D(90) + Gaussian1D(1, 0, 0.1)
class _ConstraintsTestA(Model):
stddev = Parameter(default=0, min=0, max=0.3)
mean = Parameter(default=0, fixed=True)
@staticmethod
def evaluate(stddev, mean):
return stddev, mean
class _ConstraintsTestB(Model):
mean = Parameter(default=0, fixed=True)
@staticmethod
def evaluate(mean):
return mean
@pytest.mark.parametrize('model',
[Gaussian1D(bounds={'stddev': (0, 0.3)}, fixed={'mean': True}) +
Gaussian1D(fixed={'mean': True}),
(_ConstraintsTestA + _ConstraintsTestB)()])
def test_inherit_constraints(model):
"""
Various tests for copying of constraint values between compound models and
their members.
There are two versions of this test: One where a compound model is created
from two model instances, and another where a compound model is created
from two model classes that have default constraints set on some of their
parameters.
Regression test for https://github.com/astropy/astropy/issues/3481
"""
# We have to copy the model before modifying it, otherwise the test fails
# if it is run twice in a row, because the state of the model instance
# would be preserved from one run to the next.
model = deepcopy(model)
# Lots of assertions in this test as there are multiple interfaces to
# parameter constraints
assert 'stddev_0' in model.bounds
assert model.bounds['stddev_0'] == (0, 0.3)
assert model.stddev_0.bounds == (0, 0.3)
assert 'mean_0' in model.fixed
assert model.fixed['mean_0'] is True
assert model.mean_0.fixed is True
assert 'mean_1' in model.fixed
assert model.fixed['mean_1'] is True
assert model.mean_1.fixed is True
# Great, all the constraints were inherited properly
# Now what about if we update them through the sub-models?
model[0].stddev.bounds = (0, 0.4)
assert model.bounds['stddev_0'] == (0, 0.4)
assert model.stddev_0.bounds == (0, 0.4)
assert model[0].stddev.bounds == (0, 0.4)
assert model[0].bounds['stddev'] == (0, 0.4)
model[0].bounds['stddev'] = (0.1, 0.5)
assert model.bounds['stddev_0'] == (0.1, 0.5)
assert model.stddev_0.bounds == (0.1, 0.5)
assert model[0].stddev.bounds == (0.1, 0.5)
assert model[0].bounds['stddev'] == (0.1, 0.5)
model[1].mean.fixed = False
assert model.fixed['mean_1'] is False
assert model.mean_1.fixed is False
assert model[1].mean.fixed is False
assert model[1].fixed['mean'] is False
model[1].fixed['mean'] = True
assert model.fixed['mean_1'] is True
assert model.mean_1.fixed is True
assert model[1].mean.fixed is True
assert model[1].fixed['mean'] is True
def test_compound_custom_inverse():
"""
Test that a compound model with a custom inverse has that inverse applied
when the inverse of another model, of which it is a component, is computed.
Regression test for https://github.com/astropy/astropy/issues/3542
"""
poly = Polynomial1D(1, c0=1, c1=2)
scale = Scale(1)
shift = Shift(1)
model1 = poly | scale
model1.inverse = poly
# model1 now has a custom inverse (the polynomial itself, ignoring the
# trivial scale factor)
model2 = shift | model1
assert_allclose(model2.inverse(1), (poly | shift.inverse)(1))
# Make sure an inverse is not allowed if the models were combined with the
# wrong operator, or if one of the models doesn't have an inverse defined
with pytest.raises(NotImplementedError):
(shift + model1).inverse
with pytest.raises(NotImplementedError):
(model1 & poly).inverse
@pytest.mark.parametrize('poly', [Chebyshev2D(1, 2), Polynomial2D(2), Legendre2D(1, 2),
Chebyshev1D(5), Legendre1D(5), Polynomial1D(5)])
def test_compound_with_polynomials(poly):
"""
Tests that polynomials are scaled when used in compound models.
Issue #3699
"""
poly.parameters = [1, 2, 3, 4, 1, 2]
shift = Shift(3)
model = poly | shift
x, y = np.mgrid[:20, :37]
result_compound = model(x, y)
result = shift(poly(x, y))
assert_allclose(result, result_compound)
# has to be defined at module level since pickling doesn't work right (in
# general) for classes defined in functions
class _TestPickleModel(Gaussian1D + Gaussian1D):
pass
def test_pickle_compound():
"""
Regression test for
https://github.com/astropy/astropy/issues/3867#issuecomment-114547228
"""
# Test pickling a compound model class
GG = Gaussian1D + Gaussian1D
GG2 = pickle.loads(pickle.dumps(GG))
assert GG.param_names == GG2.param_names
assert GG.__name__ == GG2.__name__
# Test that it works, or at least evaluates successfully
assert GG()(0.12345) == GG2()(0.12345)
# Test pickling a compound model instance
g1 = Gaussian1D(1.0, 0.0, 0.1)
g2 = Gaussian1D([2.0, 3.0], [0.0, 0.0], [0.2, 0.3])
m = g1 + g2
m2 = pickle.loads(pickle.dumps(m))
assert m.param_names == m2.param_names
assert m.__class__.__name__ == m2.__class__.__name__
assert np.all(m.parameters == m2.parameters)
assert np.all(m(0) == m2(0))
# Test pickling a concrete class
p = pickle.dumps(_TestPickleModel, protocol=0)
# Note: This is very dependent on the specific protocol, but the point of
# this test is that the "concrete" model is pickled in a very simple way
# that only specifies the module and class name, and is unpickled by
# re-importing the class from the module in which it was defined. This
# should still work for concrete subclasses of compound model classes that
# were dynamically generated through an expression
exp = b'castropy.modeling.tests.test_compound\n_TestPickleModel\np0\n.'
# When testing against the expected value we drop the memo length field
# at the end, which may differ between runs
assert p[:p.rfind(b'p')] == exp[:exp.rfind(b'p')]
assert pickle.loads(p) is _TestPickleModel
def test_update_parameters():
offx = Shift(1)
scl = Scale(2)
m = offx | scl
assert(m(1) == 4)
offx.offset = 42
assert(m(1) == 4)
m.factor_1 = 100
assert(m(1) == 200)
m2 = m | offx
assert(m2(1) == 242)
def test_name():
offx = Shift(1)
scl = Scale(2)
m = offx | scl
scl.name = "scale"
assert m._submodel_names == ('None_0', 'None_1')
assert m.name is None
m.name = "M"
assert m.name == "M"
m1 = m.rename("M1")
assert m.name == "M"
assert m1.name == "M1"
@pytest.mark.skipif("not HAS_SCIPY_14")
def test_tabular_in_compound():
"""
Issue #7411 - evaluate should not change the shape of the output.
"""
t = Tabular1D(points=([1, 5, 7],), lookup_table=[12, 15, 19],
bounds_error=False)
rot = Rotation2D(2)
p = Polynomial1D(1)
x = np.arange(12).reshape((3,4))
# Create a compound model which does ot execute Tabular.__call__,
# but model.evaluate and is followed by a Rotation2D which
# checks the exact shapes.
model = p & t | rot
x1, y1 = model(x, x)
assert x1.ndim == 2
assert y1.ndim == 2
|
bd9048383b5f17d3704be5fd9fc2971d62e9f635a2c0e9fd5134af1d15805ff6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from math import cos, sin
import pytest
import numpy as np
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.tests.helper import assert_quantity_allclose
from .. import models
from ...wcs import wcs
@pytest.mark.parametrize(('inp'), [(0, 0), (4000, -20.56), (-2001.5, 45.9),
(0, 90), (0, -90), (np.mgrid[:4, :6])])
def test_against_wcslib(inp):
w = wcs.WCS()
crval = [202.4823228, 47.17511893]
w.wcs.crval = crval
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
lonpole = 180
tan = models.Pix2Sky_TAN()
n2c = models.RotateNative2Celestial(crval[0], crval[1], lonpole)
c2n = models.RotateCelestial2Native(crval[0], crval[1], lonpole)
m = tan | n2c
minv = c2n | tan.inverse
radec = w.wcs_pix2world(inp[0], inp[1], 1)
xy = w.wcs_world2pix(radec[0], radec[1], 1)
assert_allclose(m(*inp), radec, atol=1e-12)
assert_allclose(minv(*radec), xy, atol=1e-12)
@pytest.mark.parametrize(('inp'), [(0, 0), (40, -20.56), (21.5, 45.9)])
def test_roundtrip_sky_rotaion(inp):
lon, lat, lon_pole = 42, 43, 44
n2c = models.RotateNative2Celestial(lon, lat, lon_pole)
c2n = models.RotateCelestial2Native(lon, lat, lon_pole)
assert_allclose(n2c.inverse(*n2c(*inp)), inp, atol=1e-13)
assert_allclose(c2n.inverse(*c2n(*inp)), inp, atol=1e-13)
def test_native_celestial_lat90():
n2c = models.RotateNative2Celestial(1, 90, 0)
alpha, delta = n2c(1, 1)
assert_allclose(delta, 1)
assert_allclose(alpha, 182)
def test_Rotation2D():
model = models.Rotation2D(angle=90)
x, y = model(1, 0)
assert_allclose([x, y], [0, 1], atol=1e-10)
def test_Rotation2D_quantity():
model = models.Rotation2D(angle=90*u.deg)
x, y = model(1*u.deg, 0*u.arcsec)
assert_quantity_allclose([x, y], [0, 1]*u.deg, atol=1e-10*u.deg)
def test_Rotation2D_inverse():
model = models.Rotation2D(angle=234.23494)
x, y = model.inverse(*model(1, 0))
assert_allclose([x, y], [1, 0], atol=1e-10)
def test_euler_angle_rotations():
x = (0, 0)
y = (90, 0)
z = (0, 90)
negx = (180, 0)
negy = (-90, 0)
# rotate y into minus z
model = models.EulerAngleRotation(0, 90, 0, 'zxz')
assert_allclose(model(*z), y, atol=10**-12)
# rotate z into minus x
model = models.EulerAngleRotation(0, 90, 0, 'zyz')
assert_allclose(model(*z), negx, atol=10**-12)
# rotate x into minus y
model = models.EulerAngleRotation(0, 90, 0, 'yzy')
assert_allclose(model(*x), negy, atol=10**-12)
euler_axes_order = ['zxz', 'zyz', 'yzy', 'yxy', 'xyx', 'xzx']
@pytest.mark.parametrize(('axes_order'), euler_axes_order)
def test_euler_angles(axes_order):
"""
Tests against all Euler sequences.
The rotation matrices definitions come from Wikipedia.
"""
phi = np.deg2rad(23.4)
theta = np.deg2rad(12.2)
psi = np.deg2rad(34)
c1 = cos(phi)
c2 = cos(theta)
c3 = cos(psi)
s1 = sin(phi)
s2 = sin(theta)
s3 = sin(psi)
matrices = {'zxz': np.array([[(c1*c3 - c2*s1*s3), (-c1*s3 - c2*c3*s1), (s1*s2)],
[(c3*s1 + c1*c2*s3), (c1*c2*c3 - s1*s3), (-c1*s2)],
[(s2*s3), (c3*s2), (c2)]]),
'zyz': np.array([[(c1*c2*c3 - s1*s3), (-c3*s1 - c1*c2*s3), (c1*s2)],
[(c1*s3 + c2*c3*s1), (c1*c3 - c2*s1*s3), (s1*s2)],
[(-c3*s2), (s2*s3), (c2)]]),
'yzy': np.array([[(c1*c2*c3 - s1*s3), (-c1*s2), (c3*s1+c1*c2*s3)],
[(c3*s2), (c2), (s2*s3)],
[(-c1*s3 - c2*c3*s1), (s1*s2), (c1*c3-c2*s1*s3)]]),
'yxy': np.array([[(c1*c3 - c2*s1*s3), (s1*s2), (c1*s3+c2*c3*s1)],
[(s2*s3), (c2), (-c3*s2)],
[(-c3*s1 - c1*c2*s3), (c1*s2), (c1*c2*c3 - s1*s3)]]),
'xyx': np.array([[(c2), (s2*s3), (c3*s2)],
[(s1*s2), (c1*c3 - c2*s1*s3), (-c1*s3 - c2*c3*s1)],
[(-c1*s2), (c3*s1 + c1*c2*s3), (c1*c2*c3 - s1*s3)]]),
'xzx': np.array([[(c2), (-c3*s2), (s2*s3)],
[(c1*s2), (c1*c2*c3 - s1*s3), (-c3*s1 - c1*c2*s3)],
[(s1*s2), (c1*s3 + c2*c3*s1), (c1*c3 - c2*s1*s3)]])
}
model = models.EulerAngleRotation(23.4, 12.2, 34, axes_order)
mat = model._create_matrix(phi, theta, psi, axes_order)
assert_allclose(mat.T, matrices[axes_order]) # get_rotation_matrix(axes_order))
|
04e24dfc2de52d80ac2dccc1f589093c38668795ad587aed2573aaf510273f7b | # Various tests of models not related to evaluation, fitting, or parameters
import pytest
from ...tests.helper import assert_quantity_allclose
from ... import units as u
from astropy.modeling.models import Mapping, Pix2Sky_TAN, Gaussian1D
from astropy.modeling import models
from astropy.modeling.core import _ModelMeta
def test_gaussian1d_bounding_box():
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
bbox = g.bounding_box
assert_quantity_allclose(bbox[0], 2.835 * u.m)
assert_quantity_allclose(bbox[1], 3.165 * u.m)
def test_gaussian1d_n_models():
g = Gaussian1D(
amplitude=[1 * u.J, 2. * u.J],
mean=[1 * u.m, 5000 * u.AA],
stddev=[0.1 * u.m, 100 * u.AA],
n_models=2)
assert_quantity_allclose(g(1.01 * u.m), [0.99501248, 0.] * u.J)
assert_quantity_allclose(
g(u.Quantity([1.01 * u.m, 5010 * u.AA])), [0.99501248, 1.990025] * u.J)
# FIXME: The following doesn't work as np.asanyarray doesn't work with a
# list of quantity objects.
# assert_quantity_allclose(g([1.01 * u.m, 5010 * u.AA]),
# [ 0.99501248, 1.990025] * u.J)
"""
Test the "rules" of model units.
"""
def test_quantity_call():
"""
Test that if constructed with Quanties models must be called with quantities.
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
g(10 * u.m)
with pytest.raises(u.UnitsError):
g(10)
def test_no_quantity_call():
"""
Test that if not constructed with Quantites they can be called without quantities.
"""
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
assert isinstance(g, Gaussian1D)
g(10)
def test_default_parameters():
# Test that calling with a quantity works when one of the parameters
# defaults to dimensionless
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm)
assert isinstance(g, Gaussian1D)
g(10*u.m)
def test_uses_quantity():
"""
Test Quantity
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
assert g.uses_quantity
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
assert not g.uses_quantity
g.mean = 3 * u.m
assert g.uses_quantity
def test_uses_quantity_compound():
"""
Test Quantity
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
g2 = Gaussian1D(mean=5 * u.m, stddev=5 * u.cm, amplitude=5 * u.Jy)
assert (g | g2).uses_quantity
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
g2 = Gaussian1D(mean=5, stddev=5, amplitude=5)
comp = g | g2
assert not (comp).uses_quantity
def test_uses_quantity_no_param():
comp = Mapping((0, 1)) | Pix2Sky_TAN()
assert comp.uses_quantity
def _allmodels():
allmodels = []
for name in dir(models):
model = getattr(models, name)
if type(model) is _ModelMeta:
try:
m = model()
except Exception:
pass
allmodels.append(m)
return allmodels
@pytest.mark.parametrize("m", _allmodels())
def test_read_only(m):
"""
input_units
return_units
input_units_allow_dimensionless
input_units_strict
"""
with pytest.raises(AttributeError):
m.input_units = {}
with pytest.raises(AttributeError):
m.return_units = {}
with pytest.raises(AttributeError):
m.input_units_allow_dimensionless = {}
with pytest.raises(AttributeError):
m.input_units_strict = {}
|
bda8cb3d5c3960310d01a8729ad7590c7389313fda2dc39d97a3de9297d2b4a6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.utils.iers package provides access to the tables provided by
the International Earth Rotation and Reference Systems Service, in
particular allowing interpolation of published UT1-UTC values for given
times. These are used in `astropy.time` to provide UT1 values. The polar
motions are also used for determining earth orientation for
celestial-to-terrestrial coordinate transformations
(in `astropy.coordinates`).
"""
from warnings import warn
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import numpy as np
from ... import config as _config
from ... import units as u
from ...table import Table, QTable
from ...utils.data import get_pkg_data_filename, clear_download_cache
from ... import utils
from ...utils.exceptions import AstropyWarning
__all__ = ['Conf', 'conf',
'IERS', 'IERS_B', 'IERS_A', 'IERS_Auto',
'FROM_IERS_B', 'FROM_IERS_A', 'FROM_IERS_A_PREDICTION',
'TIME_BEFORE_IERS_RANGE', 'TIME_BEYOND_IERS_RANGE',
'IERS_A_FILE', 'IERS_A_URL', 'IERS_A_README',
'IERS_B_FILE', 'IERS_B_URL', 'IERS_B_README',
'IERSRangeError', 'IERSStaleWarning']
# IERS-A default file name, URL, and ReadMe with content description
IERS_A_FILE = 'finals2000A.all'
IERS_A_URL = 'http://maia.usno.navy.mil/ser7/finals2000A.all'
IERS_A_README = get_pkg_data_filename('data/ReadMe.finals2000A')
# IERS-B default file name, URL, and ReadMe with content description
IERS_B_FILE = get_pkg_data_filename('data/eopc04_IAU2000.62-now')
IERS_B_URL = 'http://hpiers.obspm.fr/iers/eop/eopc04/eopc04_IAU2000.62-now'
IERS_B_README = get_pkg_data_filename('data/ReadMe.eopc04_IAU2000')
# Status/source values returned by IERS.ut1_utc
FROM_IERS_B = 0
FROM_IERS_A = 1
FROM_IERS_A_PREDICTION = 2
TIME_BEFORE_IERS_RANGE = -1
TIME_BEYOND_IERS_RANGE = -2
MJD_ZERO = 2400000.5
INTERPOLATE_ERROR = """\
interpolating from IERS_Auto using predictive values that are more
than {0} days old.
Normally you should not see this error because this class
automatically downloads the latest IERS-A table. Perhaps you are
offline? If you understand what you are doing then this error can be
suppressed by setting the auto_max_age configuration variable to
``None``:
from astropy.utils.iers import conf
conf.auto_max_age = None
"""
def download_file(*args, **kwargs):
"""
Overload astropy.utils.data.download_file within iers module to use a
custom (longer) wait time. This just passes through ``*args`` and
``**kwargs`` after temporarily setting the download_file remote timeout to
the local ``iers.conf.remote_timeout`` value.
"""
with utils.data.conf.set_temp('remote_timeout', conf.remote_timeout):
return utils.data.download_file(*args, **kwargs)
class IERSStaleWarning(AstropyWarning):
pass
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.iers`.
"""
auto_download = _config.ConfigItem(
True,
'Enable auto-downloading of the latest IERS data. If set to False '
'then the local IERS-B file will be used by default. Default is True.')
auto_max_age = _config.ConfigItem(
30.0,
'Maximum age (days) of predictive data before auto-downloading. Default is 30.')
iers_auto_url = _config.ConfigItem(
IERS_A_URL,
'URL for auto-downloading IERS file data.')
remote_timeout = _config.ConfigItem(
10.0,
'Remote timeout downloading IERS file data (seconds).')
conf = Conf()
class IERSRangeError(IndexError):
"""
Any error for when dates are outside of the valid range for IERS
"""
class IERS(QTable):
"""Generic IERS table class, defining interpolation functions.
Sub-classed from `astropy.table.QTable`. The table should hold columns
'MJD', 'UT1_UTC', 'dX_2000A'/'dY_2000A', and 'PM_x'/'PM_y'.
"""
iers_table = None
@classmethod
def open(cls, file=None, cache=False, **kwargs):
"""Open an IERS table, reading it from a file if not loaded before.
Parameters
----------
file : str or None
full local or network path to the ascii file holding IERS data,
for passing on to the ``read`` class methods (further optional
arguments that are available for some IERS subclasses can be added).
If None, use the default location from the ``read`` class method.
cache : bool
Whether to use cache. Defaults to False, since IERS files
are regularly updated.
Returns
-------
An IERS table class instance
Notes
-----
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table if ``file=None`` (the default).
If a table needs to be re-read from disk, pass on an explicit file
location or use the (sub-class) close method and re-open.
If the location is a network location it is first downloaded via
download_file.
For the IERS class itself, an IERS_B sub-class instance is opened.
"""
if file is not None or cls.iers_table is None:
if file is not None:
if urlparse(file).netloc:
kwargs.update(file=download_file(file, cache=cache))
else:
kwargs.update(file=file)
cls.iers_table = cls.read(**kwargs)
return cls.iers_table
@classmethod
def close(cls):
"""Remove the IERS table from the class.
This allows the table to be re-read from disk during one's session
(e.g., if one finds it is out of date and has updated the file).
"""
cls.iers_table = None
def mjd_utc(self, jd1, jd2=0.):
"""Turn a time to MJD, returning integer and fractional parts.
Parameters
----------
jd1 : float, array, or Time
first part of two-part JD, or Time object
jd2 : float or array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
Returns
-------
mjd : float or array
integer part of MJD
utc : float or array
fractional part of MJD
"""
try: # see if this is a Time object
jd1, jd2 = jd1.utc.jd1, jd1.utc.jd2
except Exception:
pass
mjd = np.floor(jd1 - MJD_ZERO + jd2)
utc = jd1 - (MJD_ZERO+mjd) + jd2
return mjd, utc
def ut1_utc(self, jd1, jd2=0., return_status=False):
"""Interpolate UT1-UTC corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, float array, or Time object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['UT1_UTC'],
self.ut1_utc_source if return_status else None)
def dcip_xy(self, jd1, jd2=0., return_status=False):
"""Interpolate CIP corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, float array, or Time object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD (default 0., ignored if jd1 is Time)
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
D_x : Quantity with angle units
x component of CIP correction for the requested times
D_y : Quantity with angle units
y component of CIP correction for the requested times
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['dX_2000A', 'dY_2000A'],
self.dcip_source if return_status else None)
def pm_xy(self, jd1, jd2=0., return_status=False):
"""Interpolate polar motions from IERS Table for given dates.
Parameters
----------
jd1 : float, float array, or Time object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
PM_x : Quantity with angle units
x component of polar motion for the requested times
PM_y : Quantity with angle units
y component of polar motion for the requested times
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['PM_x', 'PM_y'],
self.pm_source if return_status else None)
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""
Check that the indices from interpolation match those after clipping
to the valid table range. This method gets overridden in the IERS_Auto
class because it has different requirements.
"""
if np.any(indices_orig != indices_clipped):
raise IERSRangeError('(some) times are outside of range covered '
'by IERS table.')
def _interpolate(self, jd1, jd2, columns, source=None):
mjd, utc = self.mjd_utc(jd1, jd2)
# enforce array
is_scalar = not hasattr(mjd, '__array__') or mjd.ndim == 0
if is_scalar:
mjd = np.array([mjd])
utc = np.array([utc])
self._refresh_table_as_needed(mjd)
# For typical format, will always find a match (since MJD are integer)
# hence, important to define which side we will be; this ensures
# self['MJD'][i-1]<=mjd<self['MJD'][i]
i = np.searchsorted(self['MJD'].value, mjd, side='right')
# Get index to MJD at or just below given mjd, clipping to ensure we
# stay in range of table (status will be set below for those outside)
i1 = np.clip(i, 1, len(self) - 1)
i0 = i1 - 1
mjd_0, mjd_1 = self['MJD'][i0].value, self['MJD'][i1].value
results = []
for column in columns:
val_0, val_1 = self[column][i0], self[column][i1]
d_val = val_1 - val_0
if column == 'UT1_UTC':
# Check & correct for possible leap second (correcting diff.,
# not 1st point, since jump can only happen right at 2nd point)
d_val -= d_val.round()
# Linearly interpolate (which is what TEMPO does for UT1-UTC, but
# may want to follow IERS gazette #13 for more precise
# interpolation and correction for tidal effects;
# http://maia.usno.navy.mil/iers-gaz13)
val = val_0 + (mjd - mjd_0 + utc) / (mjd_1 - mjd_0) * d_val
# Do not extrapolate outside range, instead just propagate last values.
val[i == 0] = self[column][0]
val[i == len(self)] = self[column][-1]
if is_scalar:
val = val[0]
results.append(val)
if source:
# Set status to source, using the routine passed in.
status = source(i1)
# Check for out of range
status[i == 0] = TIME_BEFORE_IERS_RANGE
status[i == len(self)] = TIME_BEYOND_IERS_RANGE
if is_scalar:
status = status[0]
results.append(status)
return results
else:
self._check_interpolate_indices(i1, i, np.max(mjd))
return results[0] if len(results) == 1 else results
def _refresh_table_as_needed(self, mjd):
"""
Potentially update the IERS table in place depending on the requested
time values in ``mdj`` and the time span of the table. The base behavior
is not to update the table. ``IERS_Auto`` overrides this method.
"""
pass
def ut1_utc_source(self, i):
"""Source for UT1-UTC. To be overridden by subclass."""
return np.zeros_like(i)
def dcip_source(self, i):
"""Source for CIP correction. To be overridden by subclass."""
return np.zeros_like(i)
def pm_source(self, i):
"""Source for polar motion. To be overridden by subclass."""
return np.zeros_like(i)
@property
def time_now(self):
"""
Property to provide the current time, but also allow for explicitly setting
the _time_now attribute for testing purposes.
"""
from astropy.time import Time
try:
return self._time_now
except Exception:
return Time.now()
class IERS_A(IERS):
"""IERS Table class targeted to IERS A, provided by USNO.
These include rapid turnaround and predicted times.
See http://maia.usno.navy.mil/
Notes
-----
The IERS A file is not part of astropy. It can be downloaded from
``iers.IERS_A_URL``. See ``iers.__doc__`` for instructions on how to use
it in ``Time``, etc.
"""
iers_table = None
@classmethod
def _combine_a_b_columns(cls, iers_a):
"""
Return a new table with appropriate combination of IERS_A and B columns.
"""
# IERS A has some rows at the end that hold nothing but dates & MJD
# presumably to be filled later. Exclude those a priori -- there
# should at least be a predicted UT1-UTC and PM!
table = iers_a[~iers_a['UT1_UTC_A'].mask &
~iers_a['PolPMFlag_A'].mask]
# This does nothing for IERS_A, but allows IERS_Auto to ensure the
# IERS B values in the table are consistent with the true ones.
table = cls._substitute_iers_b(table)
# Run np.where on the data from the table columns, since in numpy 1.9
# it otherwise returns an only partially initialized column.
table['UT1_UTC'] = np.where(table['UT1_UTC_B'].mask,
table['UT1_UTC_A'].data,
table['UT1_UTC_B'].data)
# Ensure the unit is correct, for later column conversion to Quantity.
table['UT1_UTC'].unit = table['UT1_UTC_A'].unit
table['UT1Flag'] = np.where(table['UT1_UTC_B'].mask,
table['UT1Flag_A'].data,
'B')
# Repeat for polar motions.
table['PM_x'] = np.where(table['PM_X_B'].mask,
table['PM_x_A'].data,
table['PM_X_B'].data)
table['PM_x'].unit = table['PM_x_A'].unit
table['PM_y'] = np.where(table['PM_Y_B'].mask,
table['PM_y_A'].data,
table['PM_Y_B'].data)
table['PM_y'].unit = table['PM_y_A'].unit
table['PolPMFlag'] = np.where(table['PM_X_B'].mask,
table['PolPMFlag_A'].data,
'B')
table['dX_2000A'] = np.where(table['dX_2000A_B'].mask,
table['dX_2000A_A'].data,
table['dX_2000A_B'].data)
table['dX_2000A'].unit = table['dX_2000A_A'].unit
table['dY_2000A'] = np.where(table['dY_2000A_B'].mask,
table['dY_2000A_A'].data,
table['dY_2000A_B'].data)
table['dY_2000A'].unit = table['dY_2000A_A'].unit
table['NutFlag'] = np.where(table['dX_2000A_B'].mask,
table['NutFlag_A'].data,
'B')
# Get the table index for the first row that has predictive values
# PolPMFlag_A IERS (I) or Prediction (P) flag for
# Bull. A polar motion values
# UT1Flag_A IERS (I) or Prediction (P) flag for
# Bull. A UT1-UTC values
is_predictive = (table['UT1Flag_A'] == 'P') | (table['PolPMFlag_A'] == 'P')
table.meta['predictive_index'] = np.min(np.flatnonzero(is_predictive))
table.meta['predictive_mjd'] = table['MJD'][table.meta['predictive_index']]
return table
@classmethod
def _substitute_iers_b(cls, table):
# See documentation in IERS_Auto.
return table
@classmethod
def read(cls, file=None, readme=None):
"""Read IERS-A table from a finals2000a.* file provided by USNO.
Parameters
----------
file : str
full path to ascii file holding IERS-A data.
Defaults to ``iers.IERS_A_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_A_README``.
Returns
-------
``IERS_A`` class instance
"""
if file is None:
file = IERS_A_FILE
if readme is None:
readme = IERS_A_README
# Read in as a regular Table, including possible masked columns.
# Columns will be filled and converted to Quantity in cls.__init__.
iers_a = Table.read(file, format='cds', readme=readme)
# Combine the A and B data for UT1-UTC and PM columns
table = cls._combine_a_b_columns(iers_a)
table.meta['data_path'] = file
table.meta['readme_path'] = readme
# Fill any masked values, and convert to a QTable.
return cls(table.filled())
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table"""
ut1flag = self['UT1Flag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[ut1flag == 'I'] = FROM_IERS_A
source[ut1flag == 'P'] = FROM_IERS_A_PREDICTION
return source
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table"""
nutflag = self['NutFlag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[nutflag == 'I'] = FROM_IERS_A
source[nutflag == 'P'] = FROM_IERS_A_PREDICTION
return source
def pm_source(self, i):
"""Set polar motion source flag for entries in IERS table"""
pmflag = self['PolPMFlag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[pmflag == 'I'] = FROM_IERS_A
source[pmflag == 'P'] = FROM_IERS_A_PREDICTION
return source
class IERS_B(IERS):
"""IERS Table class targeted to IERS B, provided by IERS itself.
These are final values; see http://www.iers.org/
Notes
-----
If the package IERS B file (```iers.IERS_B_FILE``) is out of date, a new
version can be downloaded from ``iers.IERS_B_URL``.
"""
iers_table = None
@classmethod
def read(cls, file=None, readme=None, data_start=14):
"""Read IERS-B table from a eopc04_iau2000.* file provided by IERS.
Parameters
----------
file : str
full path to ascii file holding IERS-B data.
Defaults to package version, ``iers.IERS_B_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_B_README``.
data_start : int
starting row. Default is 14, appropriate for standard IERS files.
Returns
-------
``IERS_B`` class instance
"""
if file is None:
file = IERS_B_FILE
if readme is None:
readme = IERS_B_README
# Read in as a regular Table, including possible masked columns.
# Columns will be filled and converted to Quantity in cls.__init__.
iers_b = Table.read(file, format='cds', readme=readme,
data_start=data_start)
return cls(iers_b.filled())
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
def pm_source(self, i):
"""Set PM source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
class IERS_Auto(IERS_A):
"""
Provide most-recent IERS data and automatically handle downloading
of updated values as necessary.
"""
iers_table = None
@classmethod
def open(cls):
"""If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to True (default), then open a recent version of the IERS-A
table with predictions for UT1-UTC and polar motion out to
approximately one year from now. If the available version of this file
is older than ``astropy.utils.iers.conf.auto_max_age`` days old
(or non-existent) then it will be downloaded over the network and cached.
If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to False then ``astropy.utils.iers.IERS()`` is returned. This
is normally the IERS-B table that is supplied with astropy.
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table.
Returns
-------
`~astropy.table.QTable` instance with IERS (Earth rotation) data columns
"""
if not conf.auto_download:
cls.iers_table = IERS.open()
return cls.iers_table
if cls.iers_table is not None:
# If the URL has changed, we need to redownload the file, so we
# should ignore the internally cached version.
if cls.iers_table.meta.get('data_url') == conf.iers_auto_url:
return cls.iers_table
try:
filename = download_file(conf.iers_auto_url, cache=True)
except Exception as err:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream when actually trying to interpolate
# predictive values.
warn(AstropyWarning('failed to download {}, using local IERS-B: {}'
.format(conf.iers_auto_url, str(err))))
cls.iers_table = IERS.open()
return cls.iers_table
cls.iers_table = cls.read(file=filename)
cls.iers_table.meta['data_url'] = str(conf.iers_auto_url)
return cls.iers_table
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""Check that the indices from interpolation match those after clipping to the
valid table range. The IERS_Auto class is exempted as long as it has
sufficiently recent available data so the clipped interpolation is
always within the confidence bounds of current Earth rotation
knowledge.
"""
predictive_mjd = self.meta['predictive_mjd']
# See explanation in _refresh_table_as_needed for these conditions
auto_max_age = (conf.auto_max_age if conf.auto_max_age is not None
else np.finfo(float).max)
if (max_input_mjd > predictive_mjd and
self.time_now.mjd - predictive_mjd > auto_max_age):
raise ValueError(INTERPOLATE_ERROR.format(auto_max_age))
def _refresh_table_as_needed(self, mjd):
"""Potentially update the IERS table in place depending on the requested
time values in ``mjd`` and the time span of the table.
For IERS_Auto the behavior is that the table is refreshed from the IERS
server if both the following apply:
- Any of the requested IERS values are predictive. The IERS-A table
contains predictive data out for a year after the available
definitive values.
- The first predictive values are at least ``conf.auto_max_age days`` old.
In other words the IERS-A table was created by IERS long enough
ago that it can be considered stale for predictions.
"""
max_input_mjd = np.max(mjd)
now_mjd = self.time_now.mjd
# IERS-A table contains predictive data out for a year after
# the available definitive values.
fpi = self.meta['predictive_index']
predictive_mjd = self.meta['predictive_mjd']
# Update table in place if necessary
auto_max_age = (conf.auto_max_age if conf.auto_max_age is not None
else np.finfo(float).max)
# If auto_max_age is smaller than IERS update time then repeated downloads may
# occur without getting updated values (giving a IERSStaleWarning).
if auto_max_age < 10:
raise ValueError('IERS auto_max_age configuration value must be larger than 10 days')
if (max_input_mjd > predictive_mjd and
now_mjd - predictive_mjd > auto_max_age):
# Get the latest version
try:
clear_download_cache(conf.iers_auto_url)
filename = download_file(conf.iers_auto_url, cache=True)
except Exception as err:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream when actually trying to interpolate
# predictive values.
warn(AstropyWarning('failed to download {}: {}.\nA coordinate or time-related '
'calculation might be compromised or fail because the dates are '
'not covered by the available IERS file. See the '
'"IERS data access" section of the astropy documentation '
'for additional information on working offline.'
.format(conf.iers_auto_url, str(err))))
return
new_table = self.__class__.read(file=filename)
# New table has new values?
if new_table['MJD'][-1] > self['MJD'][-1]:
# Replace *replace* current values from the first predictive index through
# the end of the current table. This replacement is much faster than just
# deleting all rows and then using add_row for the whole duration.
new_fpi = np.searchsorted(new_table['MJD'].value, predictive_mjd, side='right')
n_replace = len(self) - fpi
self[fpi:] = new_table[new_fpi:new_fpi + n_replace]
# Sanity check for continuity
if new_table['MJD'][new_fpi + n_replace] - self['MJD'][-1] != 1.0 * u.d:
raise ValueError('unexpected gap in MJD when refreshing IERS table')
# Now add new rows in place
for row in new_table[new_fpi + n_replace:]:
self.add_row(row)
self.meta.update(new_table.meta)
else:
warn(IERSStaleWarning(
'IERS_Auto predictive values are older than {} days but downloading '
'the latest table did not find newer values'.format(conf.auto_max_age)))
@classmethod
def _substitute_iers_b(cls, table):
"""Substitute IERS B values with those from a real IERS B table.
IERS-A has IERS-B values included, but for reasons unknown these
do not match the latest IERS-B values (see comments in #4436).
Here, we use the bundled astropy IERS-B table to overwrite the values
in the downloaded IERS-A table.
"""
iers_b = IERS_B.open()
# Substitute IERS-B values for existing B values in IERS-A table
mjd_b = table['MJD'][~table['UT1_UTC_B'].mask]
i0 = np.searchsorted(iers_b['MJD'].value, mjd_b[0], side='left')
i1 = np.searchsorted(iers_b['MJD'].value, mjd_b[-1], side='right')
iers_b = iers_b[i0:i1]
n_iers_b = len(iers_b)
# If there is overlap then replace IERS-A values from available IERS-B
if n_iers_b > 0:
# Sanity check that we are overwriting the correct values
if not np.allclose(table['MJD'][:n_iers_b], iers_b['MJD'].value):
raise ValueError('unexpected mismatch when copying '
'IERS-B values into IERS-A table.')
# Finally do the overwrite
table['UT1_UTC_B'][:n_iers_b] = iers_b['UT1_UTC'].value
table['PM_X_B'][:n_iers_b] = iers_b['PM_x'].value
table['PM_Y_B'][:n_iers_b] = iers_b['PM_y'].value
return table
# by default for IERS class, read IERS-B table
IERS.read = IERS_B.read
|
139147385ac92e3dc8b1de3c4e36db14246c5208f523c589599ed94c12e056bd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Some might be indirectly tested already in ``astropy.io.fits.tests``.
"""
import io
import numpy as np
import pytest
from ..diff import diff_values, report_diff_values, where_not_allclose
from ...table import Table
@pytest.mark.parametrize('a', [np.nan, np.inf, 1.11, 1, 'a'])
def test_diff_values_false(a):
assert not diff_values(a, a)
@pytest.mark.parametrize(
('a', 'b'),
[(np.inf, np.nan), (1.11, 1.1), (1, 2), (1, 'a'), ('a', 'b')])
def test_diff_values_true(a, b):
assert diff_values(a, b)
def test_float_comparison():
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/21
"""
f = io.StringIO()
a = np.float32(0.029751372)
b = np.float32(0.029751368)
identical = report_diff_values(a, b, fileobj=f)
assert not identical
out = f.getvalue()
# This test doesn't care about what the exact output is, just that it
# did show a difference in their text representations
assert 'a>' in out
assert 'b>' in out
def test_diff_types():
"""
Regression test for https://github.com/astropy/astropy/issues/4122
"""
f = io.StringIO()
a = 1.0
b = '1.0'
identical = report_diff_values(a, b, fileobj=f)
assert not identical
out = f.getvalue()
assert out == (" (float) a> 1.0\n"
" (str) b> '1.0'\n"
" ? + +\n")
def test_diff_numeric_scalar_types():
""" Test comparison of different numeric scalar types. """
f = io.StringIO()
assert not report_diff_values(1.0, 1, fileobj=f)
out = f.getvalue()
assert out == ' (float) a> 1.0\n (int) b> 1\n'
def test_array_comparison():
"""
Test diff-ing two arrays.
"""
f = io.StringIO()
a = np.arange(9).reshape(3, 3)
b = a + 1
identical = report_diff_values(a, b, fileobj=f)
assert not identical
out = f.getvalue()
assert out == (' at [0, 0]:\n'
' a> 0\n'
' b> 1\n'
' at [0, 1]:\n'
' a> 1\n'
' b> 2\n'
' at [0, 2]:\n'
' a> 2\n'
' b> 3\n'
' ...and at 6 more indices.\n')
def test_diff_shaped_array_comparison():
"""
Test diff-ing two differently shaped arrays.
"""
f = io.StringIO()
a = np.empty((1, 2, 3))
identical = report_diff_values(a, a[0], fileobj=f)
assert not identical
out = f.getvalue()
assert out == (' Different array shapes:\n'
' a> (1, 2, 3)\n'
' ? ---\n'
' b> (2, 3)\n')
def test_tablediff():
"""
Test diff-ing two simple Table objects.
"""
a = Table.read("""name obs_date mag_b mag_v
M31 2012-01-02 17.0 16.0
M82 2012-10-29 16.2 15.2
M101 2012-10-31 15.1 15.5""", format='ascii')
b = Table.read("""name obs_date mag_b mag_v
M31 2012-01-02 17.0 16.5
M82 2012-10-29 16.2 15.2
M101 2012-10-30 15.1 15.5
NEW 2018-05-08 nan 9.0""", format='ascii')
f = io.StringIO()
identical = report_diff_values(a, b, fileobj=f)
assert not identical
out = f.getvalue()
assert out == (' name obs_date mag_b mag_v\n'
' ---- ---------- ----- -----\n'
' a> M31 2012-01-02 17.0 16.0\n'
' ? ^\n'
' b> M31 2012-01-02 17.0 16.5\n'
' ? ^\n'
' M82 2012-10-29 16.2 15.2\n'
' a> M101 2012-10-31 15.1 15.5\n'
' ? ^\n'
' b> M101 2012-10-30 15.1 15.5\n'
' ? ^\n'
' b> NEW 2018-05-08 nan 9.0\n')
# Identical
assert report_diff_values(a, a, fileobj=f)
@pytest.mark.parametrize('kwargs', [{}, {'atol': 0, 'rtol': 0}])
def test_where_not_allclose(kwargs):
a = np.array([1, np.nan, np.inf, 4.5])
b = np.array([1, np.inf, np.nan, 4.6])
assert where_not_allclose(a, b, **kwargs) == ([3], )
assert len(where_not_allclose(a, a, **kwargs)[0]) == 0
|
c0d2ce9bf1c9c44d8fc2b4a3923b4f89e92b6dbd46aaff66fa2675513be9061a | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import hashlib
import os
import pathlib
import sys
import tempfile
import urllib.request
import urllib.error
import pytest
from ..data import (_get_download_cache_locs, CacheMissingWarning,
get_pkg_data_filename, get_readable_fileobj)
from ...tests.helper import raises, catch_warnings
TESTURL = 'http://www.astropy.org'
# General file object function
try:
import bz2 # noqa
except ImportError:
HAS_BZ2 = False
else:
HAS_BZ2 = True
try:
import lzma # noqa
except ImportError:
HAS_XZ = False
else:
HAS_XZ = True
@pytest.mark.remote_data(source='astropy')
def test_download_nocache():
from ..data import download_file
fnout = download_file(TESTURL)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source='astropy')
def test_download_parallel():
from ..data import conf, download_files_in_parallel
main_url = conf.dataurl
mirror_url = conf.dataurl_mirror
fileloc = 'intersphinx/README'
try:
fnout = download_files_in_parallel([main_url, main_url + fileloc])
except urllib.error.URLError: # Use mirror if timed out
fnout = download_files_in_parallel([mirror_url, mirror_url + fileloc])
assert all([os.path.isfile(f) for f in fnout]), fnout
# NOTE: Does not need remote data.
def test_download_mirror_cache():
import pathlib
import shelve
from ..data import _find_pkg_data_path, download_file, get_cached_urls
main_url = pathlib.Path(
_find_pkg_data_path(os.path.join('data', 'dataurl'))).as_uri()
mirror_url = pathlib.Path(
_find_pkg_data_path(os.path.join('data', 'dataurl_mirror'))).as_uri()
main_file = main_url + '/index.html'
mirror_file = mirror_url + '/index.html'
# "Download" files by rerouting URLs to local URIs.
download_file(main_file, cache=True)
download_file(mirror_file, cache=True)
# Now test that download_file looks in mirror's cache before download.
# https://github.com/astropy/astropy/issues/6982
dldir, urlmapfn = _get_download_cache_locs()
with shelve.open(urlmapfn) as url2hash:
del url2hash[main_file]
# Comparing hash should be good enough?
# This test also tests for "assert TESTURL in get_cached_urls()".
c_urls = get_cached_urls()
assert ((download_file(main_file, cache=True) ==
download_file(mirror_file, cache=True)) and
(mirror_file in c_urls) and (main_file not in c_urls))
@pytest.mark.remote_data(source='astropy')
def test_download_noprogress():
from ..data import download_file
fnout = download_file(TESTURL, show_progress=False)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source='astropy')
def test_download_cache():
from ..data import download_file, clear_download_cache
download_dir = _get_download_cache_locs()[0]
# Download the test URL and make sure it exists, then clear just that
# URL and make sure it got deleted.
fnout = download_file(TESTURL, cache=True)
assert os.path.isdir(download_dir)
assert os.path.isfile(fnout)
clear_download_cache(TESTURL)
assert not os.path.exists(fnout)
# Test issues raised in #4427 with clear_download_cache() without a URL,
# followed by subsequent download.
fnout = download_file(TESTURL, cache=True)
assert os.path.isfile(fnout)
clear_download_cache()
assert not os.path.exists(fnout)
assert not os.path.exists(download_dir)
fnout = download_file(TESTURL, cache=True)
assert os.path.isfile(fnout)
# Clearing download cache succeeds even if the URL does not exist.
clear_download_cache('http://this_was_never_downloaded_before.com')
# Make sure lockdir was released
lockdir = os.path.join(download_dir, 'lock')
assert not os.path.isdir(lockdir), 'Cache dir lock was not released!'
@pytest.mark.remote_data(source='astropy')
def test_url_nocache():
with get_readable_fileobj(TESTURL, cache=False, encoding='utf-8') as page:
assert page.read().find('Astropy') > -1
@pytest.mark.remote_data(source='astropy')
def test_find_by_hash():
from ..data import clear_download_cache
with get_readable_fileobj(TESTURL, encoding="binary", cache=True) as page:
hash = hashlib.md5(page.read())
hashstr = 'hash/' + hash.hexdigest()
fnout = get_pkg_data_filename(hashstr)
assert os.path.isfile(fnout)
clear_download_cache(hashstr[5:])
assert not os.path.isfile(fnout)
lockdir = os.path.join(_get_download_cache_locs()[0], 'lock')
assert not os.path.isdir(lockdir), 'Cache dir lock was not released!'
@pytest.mark.remote_data(source='astropy')
def test_find_invalid():
# this is of course not a real data file and not on any remote server, but
# it should *try* to go to the remote server
with pytest.raises(urllib.error.URLError):
get_pkg_data_filename('kjfrhgjklahgiulrhgiuraehgiurhgiuhreglhurieghruelighiuerahiulruli')
# Package data functions
@pytest.mark.parametrize(('filename'), ['local.dat', 'local.dat.gz',
'local.dat.bz2', 'local.dat.xz'])
def test_local_data_obj(filename):
from ..data import get_pkg_data_fileobj
if (not HAS_BZ2 and 'bz2' in filename) or (not HAS_XZ and 'xz' in filename):
with pytest.raises(ValueError) as e:
with get_pkg_data_fileobj(os.path.join('data', filename), encoding='binary') as f:
f.readline()
# assert f.read().rstrip() == b'CONTENT'
assert ' format files are not supported' in str(e)
else:
with get_pkg_data_fileobj(os.path.join('data', filename), encoding='binary') as f:
f.readline()
assert f.read().rstrip() == b'CONTENT'
@pytest.fixture(params=['invalid.dat.bz2', 'invalid.dat.gz'])
def bad_compressed(request, tmpdir):
# These contents have valid headers for their respective file formats, but
# are otherwise malformed and invalid.
bz_content = b'BZhinvalid'
gz_content = b'\x1f\x8b\x08invalid'
datafile = tmpdir.join(request.param)
filename = datafile.strpath
if filename.endswith('.bz2'):
contents = bz_content
elif filename.endswith('.gz'):
contents = gz_content
else:
contents = 'invalid'
datafile.write(contents, mode='wb')
return filename
def test_local_data_obj_invalid(bad_compressed):
is_bz2 = bad_compressed.endswith('.bz2')
is_xz = bad_compressed.endswith('.xz')
# Note, since these invalid files are created on the fly in order to avoid
# problems with detection by antivirus software
# (see https://github.com/astropy/astropy/issues/6520), it is no longer
# possible to use ``get_pkg_data_fileobj`` to read the files. Technically,
# they're not local anymore: they just live in a temporary directory
# created by pytest. However, we can still use get_readable_fileobj for the
# test.
if (not HAS_BZ2 and is_bz2) or (not HAS_XZ and is_xz):
with pytest.raises(ValueError) as e:
with get_readable_fileobj(bad_compressed, encoding='binary') as f:
f.read()
assert ' format files are not supported' in str(e)
else:
with get_readable_fileobj(bad_compressed, encoding='binary') as f:
assert f.read().rstrip().endswith(b'invalid')
def test_local_data_name():
fnout = get_pkg_data_filename('data/local.dat')
assert os.path.isfile(fnout) and fnout.endswith('local.dat')
# TODO: if in the future, the root data/ directory is added in, the below
# test should be uncommented and the README.rst should be replaced with
# whatever file is there
# get something in the astropy root
# fnout2 = get_pkg_data_filename('../../data/README.rst')
# assert os.path.isfile(fnout2) and fnout2.endswith('README.rst')
def test_data_name_third_party_package():
"""Regression test for issue #1256
Tests that `get_pkg_data_filename` works in a third-party package that
doesn't make any relative imports from the module it's used from.
Uses a test package under ``data/test_package``.
"""
# Get the actual data dir:
data_dir = os.path.join(os.path.dirname(__file__), 'data')
sys.path.insert(0, data_dir)
try:
import test_package
filename = test_package.get_data_filename()
assert filename == os.path.join(data_dir, 'test_package', 'data',
'foo.txt')
finally:
sys.path.pop(0)
@raises(RuntimeError)
def test_local_data_nonlocalfail():
# this would go *outside* the astropy tree
get_pkg_data_filename('../../../data/README.rst')
def test_compute_hash(tmpdir):
from ..data import compute_hash
rands = b'1234567890abcdefghijklmnopqrstuvwxyz'
filename = tmpdir.join('tmp.dat').strpath
with open(filename, 'wb') as ntf:
ntf.write(rands)
ntf.flush()
chhash = compute_hash(filename)
shash = hashlib.md5(rands).hexdigest()
assert chhash == shash
def test_get_pkg_data_contents():
from ..data import get_pkg_data_fileobj, get_pkg_data_contents
with get_pkg_data_fileobj('data/local.dat') as f:
contents1 = f.read()
contents2 = get_pkg_data_contents('data/local.dat')
assert contents1 == contents2
@pytest.mark.remote_data(source='astropy')
def test_data_noastropy_fallback(monkeypatch):
"""
Tests to make sure the default behavior when the cache directory can't
be located is correct
"""
from .. import data
from ...config import paths
# needed for testing the *real* lock at the end
lockdir = os.path.join(_get_download_cache_locs()[0], 'lock')
# better yet, set the configuration to make sure the temp files are deleted
data.conf.delete_temporary_downloads_at_exit = True
# make sure the config and cache directories are not searched
monkeypatch.setenv(str('XDG_CONFIG_HOME'), 'foo')
monkeypatch.delenv(str('XDG_CONFIG_HOME'))
monkeypatch.setenv(str('XDG_CACHE_HOME'), 'bar')
monkeypatch.delenv(str('XDG_CACHE_HOME'))
monkeypatch.setattr(paths.set_temp_config, '_temp_path', None)
monkeypatch.setattr(paths.set_temp_cache, '_temp_path', None)
# make sure the _find_or_create_astropy_dir function fails as though the
# astropy dir could not be accessed
def osraiser(dirnm, linkto):
raise OSError
monkeypatch.setattr(paths, '_find_or_create_astropy_dir', osraiser)
with pytest.raises(OSError):
# make sure the config dir search fails
paths.get_cache_dir()
# first try with cache
with catch_warnings(CacheMissingWarning) as w:
fnout = data.download_file(TESTURL, cache=True)
assert os.path.isfile(fnout)
assert len(w) > 1
w1 = w.pop(0)
w2 = w.pop(0)
assert w1.category == CacheMissingWarning
assert 'Remote data cache could not be accessed' in w1.message.args[0]
assert w2.category == CacheMissingWarning
assert 'File downloaded to temporary location' in w2.message.args[0]
assert fnout == w2.message.args[1]
# clearing the cache should be a no-up that doesn't affect fnout
with catch_warnings(CacheMissingWarning) as w:
data.clear_download_cache(TESTURL)
assert os.path.isfile(fnout)
# now remove it so tests don't clutter up the temp dir this should get
# called at exit, anyway, but we do it here just to make sure it's working
# correctly
data._deltemps()
assert not os.path.isfile(fnout)
assert len(w) > 0
w3 = w.pop()
assert w3.category == data.CacheMissingWarning
assert 'Not clearing data cache - cache inacessable' in str(w3.message)
# now try with no cache
with catch_warnings(CacheMissingWarning) as w:
fnnocache = data.download_file(TESTURL, cache=False)
with open(fnnocache, 'rb') as page:
assert page.read().decode('utf-8').find('Astropy') > -1
# no warnings should be raise in fileobj because cache is unnecessary
assert len(w) == 0
# lockdir determined above as the *real* lockdir, not the temp one
assert not os.path.isdir(lockdir), 'Cache dir lock was not released!'
@pytest.mark.parametrize(('filename'), [
'unicode.txt',
'unicode.txt.gz',
pytest.param('unicode.txt.bz2', marks=pytest.mark.xfail(not HAS_BZ2, reason='no bz2 support')),
pytest.param('unicode.txt.xz', marks=pytest.mark.xfail(not HAS_XZ, reason='no lzma support'))])
def test_read_unicode(filename):
from ..data import get_pkg_data_contents
contents = get_pkg_data_contents(os.path.join('data', filename), encoding='utf-8')
assert isinstance(contents, str)
contents = contents.splitlines()[1]
assert contents == "האסטרונומי פייתון"
contents = get_pkg_data_contents(os.path.join('data', filename), encoding='binary')
assert isinstance(contents, bytes)
x = contents.splitlines()[1]
assert x == (b"\xff\xd7\x94\xd7\x90\xd7\xa1\xd7\x98\xd7\xa8\xd7\x95\xd7\xa0"
b"\xd7\x95\xd7\x9e\xd7\x99 \xd7\xa4\xd7\x99\xd7\x99\xd7\xaa\xd7\x95\xd7\x9f"[1:])
def test_compressed_stream():
import base64
gzipped_data = (b"H4sICIxwG1AAA2xvY2FsLmRhdAALycgsVkjLzElVANKlxakpCpl5CiUZqQ"
b"olqcUl8Tn5yYk58SmJJYnxWmCRzLx0hbTSvOSSzPy8Yi5nf78QV78QLgAlLytnRQAAAA==")
gzipped_data = base64.b64decode(gzipped_data)
assert isinstance(gzipped_data, bytes)
class FakeStream:
"""
A fake stream that has `read`, but no `seek`.
"""
def __init__(self, data):
self.data = data
def read(self, nbytes=None):
if nbytes is None:
result = self.data
self.data = b''
else:
result = self.data[:nbytes]
self.data = self.data[nbytes:]
return result
stream = FakeStream(gzipped_data)
with get_readable_fileobj(stream, encoding='binary') as f:
f.readline()
assert f.read().rstrip() == b'CONTENT'
@pytest.mark.remote_data(source='astropy')
def test_invalid_location_download():
"""
checks that download_file gives a URLError and not an AttributeError,
as its code pathway involves some fiddling with the exception.
"""
from ..data import download_file
with pytest.raises(urllib.error.URLError):
download_file('http://www.astropy.org/nonexistentfile')
def test_invalid_location_download_noconnect():
"""
checks that download_file gives an OSError if the socket is blocked
"""
from ..data import download_file
# This should invoke socket's monkeypatched failure
with pytest.raises(OSError):
download_file('http://astropy.org/nonexistentfile')
@pytest.mark.remote_data(source='astropy')
def test_is_url_in_cache():
from ..data import download_file, is_url_in_cache
assert not is_url_in_cache('http://astropy.org/nonexistentfile')
download_file(TESTURL, cache=True, show_progress=False)
assert is_url_in_cache(TESTURL)
def test_get_readable_fileobj_cleans_up_temporary_files(tmpdir, monkeypatch):
"""checks that get_readable_fileobj leaves no temporary files behind"""
# Create a 'file://' URL pointing to a path on the local filesystem
local_filename = get_pkg_data_filename(os.path.join('data', 'local.dat'))
url = 'file://' + urllib.request.pathname2url(local_filename)
# Save temporary files to a known location
monkeypatch.setattr(tempfile, 'tempdir', str(tmpdir))
# Call get_readable_fileobj() as a context manager
with get_readable_fileobj(url):
pass
# Get listing of files in temporary directory
tempdir_listing = tmpdir.listdir()
# Assert that the temporary file was empty after get_readable_fileobj()
# context manager finished running
assert len(tempdir_listing) == 0
def test_path_objects_get_readable_fileobj():
fpath = pathlib.Path(get_pkg_data_filename(os.path.join('data', 'local.dat')))
with get_readable_fileobj(fpath) as f:
assert f.read().rstrip() == ('This file is used in the test_local_data_* '
'testing functions\nCONTENT')
|
4137fdd76d7fba3a7514a1f3eed03a944ed35d2efebd23bdb673e422f641f4ae | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# namedtuple is needed for find_mod_objs so it can have a non-local module
from collections import namedtuple
import pytest
from .. import introspection
from ..introspection import (find_current_module, find_mod_objs,
isinstancemethod, minversion)
def test_pkg_finder():
"""
Tests that the `find_current_module` function works. Note that
this also implicitly tests compat.misc._patched_getmodule
"""
mod1 = 'astropy.utils.introspection'
mod2 = 'astropy.utils.tests.test_introspection'
mod3 = 'astropy.utils.tests.test_introspection'
assert find_current_module(0).__name__ == mod1
assert find_current_module(1).__name__ == mod2
assert find_current_module(0, True).__name__ == mod3
def test_find_current_mod():
from sys import getrecursionlimit
thismodnm = __name__
assert find_current_module(0) is introspection
assert find_current_module(1).__name__ == thismodnm
assert find_current_module(getrecursionlimit() + 1) is None
assert find_current_module(0, True).__name__ == thismodnm
assert find_current_module(0, [introspection]).__name__ == thismodnm
assert find_current_module(0, ['astropy.utils.introspection']).__name__ == thismodnm
with pytest.raises(ImportError):
find_current_module(0, ['faddfdsasewrweriopunjlfiurrhujnkflgwhu'])
def test_find_mod_objs():
lnms, fqns, objs = find_mod_objs('astropy')
# this import is after the above call intentionally to make sure
# find_mod_objs properly imports astropy on its own
import astropy
# just check for astropy.test ... other things might be added, so we
# shouldn't check that it's the only thing
assert 'test' in lnms
assert astropy.test in objs
lnms, fqns, objs = find_mod_objs(__name__, onlylocals=False)
assert 'namedtuple' in lnms
assert 'collections.namedtuple' in fqns
assert namedtuple in objs
lnms, fqns, objs = find_mod_objs(__name__, onlylocals=True)
assert 'namedtuple' not in lnms
assert 'collections.namedtuple' not in fqns
assert namedtuple not in objs
def test_minversion():
from types import ModuleType
test_module = ModuleType(str("test_module"))
test_module.__version__ = '0.12.2'
good_versions = ['0.12', '0.12.1', '0.12.0.dev', '0.12dev']
bad_versions = ['1', '1.2rc1']
for version in good_versions:
assert minversion(test_module, version)
for version in bad_versions:
assert not minversion(test_module, version)
|
8a8f9c6df9c1580ac1a2b6e9429b93abed2f0ebc8082acdebfaed9616ed6a4a5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a collection of monkey patches and workarounds for bugs in
earlier versions of Numpy.
"""
from ...utils import minversion
__all__ = ['NUMPY_LT_1_14', 'NUMPY_LT_1_14_1', 'NUMPY_LT_1_14_2']
# TODO: It might also be nice to have aliases to these named for specific
# features/bugs we're checking for (ex:
# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)
NUMPY_LT_1_14 = not minversion('numpy', '1.14')
NUMPY_LT_1_14_1 = not minversion('numpy', '1.14.1')
NUMPY_LT_1_14_2 = not minversion('numpy', '1.14.2')
|
4301940c14f4599e66ed68bf5564d296b125194bc0db0837747457e8e03372c1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import urllib.request
import pytest
import numpy as np
from ....tests.helper import assert_quantity_allclose, catch_warnings
from .. import iers
from .... import units as u
from ....table import QTable
from ....time import Time, TimeDelta
from ....utils.exceptions import AstropyWarning
FILE_NOT_FOUND_ERROR = getattr(__builtins__, 'FileNotFoundError', OSError)
try:
iers.IERS_A.open('finals2000A.all') # check if IERS_A is available
except OSError:
HAS_IERS_A = False
else:
HAS_IERS_A = True
IERS_A_EXCERPT = os.path.join(os.path.dirname(__file__), 'iers_a_excerpt')
class TestBasic():
"""Basic tests that IERS_B returns correct values"""
def test_simple(self):
iers.IERS.close()
assert iers.IERS.iers_table is None
iers_tab = iers.IERS.open()
assert iers.IERS.iers_table is not None
assert isinstance(iers.IERS.iers_table, QTable)
assert iers_tab['UT1_UTC'].unit is u.second
assert iers_tab['PM_x'].unit is u.arcsecond
assert iers_tab['PM_y'].unit is u.arcsecond
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc = iers_tab.ut1_utc(jd1, jd2)
assert isinstance(ut1_utc, u.Quantity)
assert ut1_utc.unit is u.second
# IERS files change at the 0.1 ms level; see gh-6981
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
# should be future-proof; surely we've moved to another planet by then
with pytest.raises(IndexError):
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.)
# also check it returns the right status
ut1_utc2, status2 = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status2 == iers.FROM_IERS_B)
ut1_utc4, status4 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status4 == iers.TIME_BEYOND_IERS_RANGE
# check it works via Time too
t = Time(jd1, jd2, format='jd', scale='utc')
ut1_utc3 = iers_tab.ut1_utc(t)
assert_quantity_allclose(ut1_utc3, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
def test_open_filename(self):
iers.IERS.close()
iers.IERS.open(iers.IERS_B_FILE)
assert iers.IERS.iers_table is not None
assert isinstance(iers.IERS.iers_table, QTable)
iers.IERS.close()
with pytest.raises(FILE_NOT_FOUND_ERROR):
iers.IERS.open('surely this does not exist')
def test_open_network_url(self):
iers.IERS_A.close()
iers.IERS_A.open("file:" + urllib.request.pathname2url(IERS_A_EXCERPT))
assert iers.IERS_A.iers_table is not None
assert isinstance(iers.IERS_A.iers_table, QTable)
iers.IERS_A.close()
class TestIERS_AExcerpt():
def test_simple(self):
# Test the IERS A reader. It is also a regression tests that ensures
# values do not get overridden by IERS B; see #4933.
iers_tab = iers.IERS_A.open(IERS_A_EXCERPT)
assert iers_tab['UT1_UTC'].unit is u.second
assert 'P' in iers_tab['UT1Flag']
assert 'I' in iers_tab['UT1Flag']
assert 'B' in iers_tab['UT1Flag']
assert np.all((iers_tab['UT1Flag'] == 'I') |
(iers_tab['UT1Flag'] == 'P') |
(iers_tab['UT1Flag'] == 'B'))
assert iers_tab['dX_2000A'].unit is u.marcsec
assert iers_tab['dY_2000A'].unit is u.marcsec
assert 'P' in iers_tab['NutFlag']
assert 'I' in iers_tab['NutFlag']
assert 'B' in iers_tab['NutFlag']
assert np.all((iers_tab['NutFlag'] == 'P') |
(iers_tab['NutFlag'] == 'I') |
(iers_tab['NutFlag'] == 'B'))
assert iers_tab['PM_x'].unit is u.arcsecond
assert iers_tab['PM_y'].unit is u.arcsecond
assert 'P' in iers_tab['PolPMFlag']
assert 'I' in iers_tab['PolPMFlag']
assert 'B' in iers_tab['PolPMFlag']
assert np.all((iers_tab['PolPMFlag'] == 'P') |
(iers_tab['PolPMFlag'] == 'I') |
(iers_tab['PolPMFlag'] == 'B'))
t = Time([57053., 57054., 57055.], format='mjd')
ut1_utc, status = iers_tab.ut1_utc(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
assert_quantity_allclose(ut1_utc,
[-0.4916557, -0.4925323, -0.4934373] * u.s,
atol=0.1*u.ms)
dcip_x,dcip_y, status = iers_tab.dcip_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
print(dcip_x)
print(dcip_y)
assert_quantity_allclose(dcip_x,
[-0.086, -0.093, -0.087] * u.marcsec,
atol=1.*u.narcsec)
assert_quantity_allclose(dcip_y,
[0.094, 0.081, 0.072] * u.marcsec,
atol=1*u.narcsec)
pm_x, pm_y, status = iers_tab.pm_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
assert_quantity_allclose(pm_x,
[0.003734, 0.004581, 0.004623] * u.arcsec,
atol=0.1*u.marcsec)
assert_quantity_allclose(pm_y,
[0.310824, 0.313150, 0.315517] * u.arcsec,
atol=0.1*u.marcsec)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
@pytest.mark.skipif(str('not HAS_IERS_A'))
class TestIERS_A():
def test_simple(self):
"""Test that open() by default reads a 'finals2000A.all' file."""
# Ensure we remove any cached table (gh-5131).
iers.IERS_A.close()
iers_tab = iers.IERS_A.open()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc, status = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status == iers.FROM_IERS_B)
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status2 == iers.TIME_BEYOND_IERS_RANGE
tnow = Time.now()
ut1_utc3, status3 = iers_tab.ut1_utc(tnow, return_status=True)
assert status3 == iers.FROM_IERS_A_PREDICTION
assert ut1_utc3 != 0.
class TestIERS_Auto():
def setup_class(self):
"""Set up useful data for the tests.
"""
self.N = 40
self.ame = 30.0
self.iers_a_file_1 = os.path.join(os.path.dirname(__file__), 'finals2000A-2016-02-30-test')
self.iers_a_file_2 = os.path.join(os.path.dirname(__file__), 'finals2000A-2016-04-30-test')
self.iers_a_url_1 = os.path.normpath('file://' + os.path.abspath(self.iers_a_file_1))
self.iers_a_url_2 = os.path.normpath('file://' + os.path.abspath(self.iers_a_file_2))
self.t = Time.now() + TimeDelta(10, format='jd') * np.arange(self.N)
def teardown_method(self, method):
"""Run this after every test.
"""
iers.IERS_Auto.close()
def test_interpolate_error_formatting(self):
"""Regression test: make sure the error message in
IERS_Auto._check_interpolate_indices() is formatted correctly.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', self.ame):
with pytest.raises(ValueError) as err:
iers_table = iers.IERS_Auto.open()
delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert str(err.value) == iers.INTERPOLATE_ERROR.format(self.ame)
def test_auto_max_age_none(self):
"""Make sure that iers.INTERPOLATE_ERROR's advice about setting
auto_max_age = None actually works.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', None):
iers_table = iers.IERS_Auto.open()
delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert isinstance(delta, np.ndarray)
assert delta.shape == (self.N,)
assert_quantity_allclose(delta, np.array([-0.2246227]*self.N)*u.s)
def test_auto_max_age_minimum(self):
"""Check that the minimum auto_max_age is enforced.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', 5.0):
with pytest.raises(ValueError) as err:
iers_table = iers.IERS_Auto.open()
delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert str(err.value) == 'IERS auto_max_age configuration value must be larger than 10 days'
@pytest.mark.remote_data
def test_no_auto_download(self):
with iers.conf.set_temp('auto_download', False):
t = iers.IERS_Auto.open()
assert type(t) is iers.IERS_B
@pytest.mark.remote_data
def test_simple(self):
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
dat = iers.IERS_Auto.open()
assert dat['MJD'][0] == 57359.0 * u.d
assert dat['MJD'][-1] == 57539.0 * u.d
# Pretend we are accessing at a time 7 days after start of predictive data
predictive_mjd = dat.meta['predictive_mjd']
dat._time_now = Time(predictive_mjd, format='mjd') + 7 * u.d
# Look at times before and after the test file begins. 0.1292905 is
# the IERS-B value from MJD=57359. The value in
# finals2000A-2016-02-30-test has been replaced at this point.
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905)
assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.2246227)
# Now pretend we are accessing at time 60 days after start of predictive data.
# There will be a warning when downloading the file doesn't give new data
# and an exception when extrapolating into the future with insufficient data.
dat._time_now = Time(predictive_mjd, format='mjd') + 60 * u.d
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905)
with catch_warnings(iers.IERSStaleWarning) as warns:
with pytest.raises(ValueError) as err:
dat.ut1_utc(Time(60000, format='mjd').jd)
assert 'interpolating from IERS_Auto using predictive values' in str(err)
assert len(warns) == 1
assert 'IERS_Auto predictive values are older' in str(warns[0].message)
# Warning only if we are getting return status
with catch_warnings(iers.IERSStaleWarning) as warns:
dat.ut1_utc(Time(60000, format='mjd').jd, return_status=True)
assert len(warns) == 1
assert 'IERS_Auto predictive values are older' in str(warns[0].message)
# Now set auto_max_age = None which says that we don't care how old the
# available IERS-A file is. There should be no warnings or exceptions.
with iers.conf.set_temp('auto_max_age', None):
with catch_warnings(iers.IERSStaleWarning) as warns:
dat.ut1_utc(Time(60000, format='mjd').jd)
assert not warns
# Now point to a later file with same values but MJD increased by
# 60 days and see that things work. dat._time_now is still the same value
# as before, i.e. right around the start of predictive values for the new file.
# (In other words this is like downloading the latest file online right now).
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_2):
# Look at times before and after the test file begins. This forces a new download.
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905)
assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.3)
# Now the time range should be different.
assert dat['MJD'][0] == 57359.0 * u.d
assert dat['MJD'][-1] == (57539.0 + 60) * u.d
|
fc33fc2ef1369048732a5def912006ba6983d1af3669a1c3ee425d1dabf6fb8d | # configobj.py
# A config file reader/writer that supports nested sections in config files.
# Copyright (C) 2005-2014:
# (name) : (email)
# Michael Foord: fuzzyman AT voidspace DOT org DOT uk
# Nicola Larosa: nico AT tekNico DOT net
# Rob Dennis: rdennis AT gmail DOT com
# Eli Courtwright: eli AT courtwright DOT org
# This software is licensed under the terms of the BSD license.
# http://opensource.org/licenses/BSD-3-Clause
# ConfigObj 5 - main repository for documentation and issue tracking:
# https://github.com/DiffSK/configobj
import os
import re
import sys
import collections
from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
# imported lazily to avoid startup performance hit if it isn't used
compiler = None
# A dictionary mapping BOM to
# the encoding to decode with, and what to set the
# encoding attribute to.
BOMS = {
BOM_UTF8: ('utf_8', None),
BOM_UTF16_BE: ('utf16_be', 'utf_16'),
BOM_UTF16_LE: ('utf16_le', 'utf_16'),
BOM_UTF16: ('utf_16', 'utf_16'),
}
# All legal variants of the BOM codecs.
# TODO: the list of aliases is not meant to be exhaustive, is there a
# better way ?
BOM_LIST = {
'utf_16': 'utf_16',
'u16': 'utf_16',
'utf16': 'utf_16',
'utf-16': 'utf_16',
'utf16_be': 'utf16_be',
'utf_16_be': 'utf16_be',
'utf-16be': 'utf16_be',
'utf16_le': 'utf16_le',
'utf_16_le': 'utf16_le',
'utf-16le': 'utf16_le',
'utf_8': 'utf_8',
'u8': 'utf_8',
'utf': 'utf_8',
'utf8': 'utf_8',
'utf-8': 'utf_8',
}
# Map of encodings to the BOM to write.
BOM_SET = {
'utf_8': BOM_UTF8,
'utf_16': BOM_UTF16,
'utf16_be': BOM_UTF16_BE,
'utf16_le': BOM_UTF16_LE,
None: BOM_UTF8
}
def match_utf8(encoding):
return BOM_LIST.get(encoding.lower()) == 'utf_8'
# Quote strings used for writing values
squot = "'%s'"
dquot = '"%s"'
noquot = "%s"
wspace_plus = ' \r\n\v\t\'"'
tsquot = '"""%s"""'
tdquot = "'''%s'''"
# Sentinel for use in getattr calls to replace hasattr
MISSING = object()
__all__ = (
'DEFAULT_INDENT_TYPE',
'DEFAULT_INTERPOLATION',
'ConfigObjError',
'NestingError',
'ParseError',
'DuplicateError',
'ConfigspecError',
'ConfigObj',
'SimpleVal',
'InterpolationError',
'InterpolationLoopError',
'MissingInterpolationOption',
'RepeatSectionError',
'ReloadError',
'UnreprError',
'UnknownType',
'flatten_errors',
'get_extra_values'
)
DEFAULT_INTERPOLATION = 'configparser'
DEFAULT_INDENT_TYPE = ' '
MAX_INTERPOL_DEPTH = 10
OPTION_DEFAULTS = {
'interpolation': True,
'raise_errors': False,
'list_values': True,
'create_empty': False,
'file_error': False,
'configspec': None,
'stringify': True,
# option may be set to one of ('', ' ', '\t')
'indent_type': None,
'encoding': None,
'default_encoding': None,
'unrepr': False,
'write_empty_values': False,
}
# this could be replaced if six is used for compatibility, or there are no
# more assertions about items being a string
def getObj(s):
global compiler
if compiler is None:
import compiler
s = "a=" + s
p = compiler.parse(s)
return p.getChildren()[1].getChildren()[0].getChildren()[1]
class UnknownType(Exception):
pass
class Builder(object):
def build(self, o):
if m is None:
raise UnknownType(o.__class__.__name__)
return m(o)
def build_List(self, o):
return list(map(self.build, o.getChildren()))
def build_Const(self, o):
return o.value
def build_Dict(self, o):
d = {}
i = iter(map(self.build, o.getChildren()))
for el in i:
d[el] = next(i)
return d
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
if o.name == 'None':
return None
if o.name == 'True':
return True
if o.name == 'False':
return False
# An undefined Name
raise UnknownType('Undefined Name')
def build_Add(self, o):
real, imag = list(map(self.build_Const, o.getChildren()))
try:
real = float(real)
except TypeError:
raise UnknownType('Add')
if not isinstance(imag, complex) or imag.real != 0.0:
raise UnknownType('Add')
return real+imag
def build_Getattr(self, o):
parent = self.build(o.expr)
return getattr(parent, o.attrname)
def build_UnarySub(self, o):
return -self.build_Const(o.getChildren()[0])
def build_UnaryAdd(self, o):
return self.build_Const(o.getChildren()[0])
_builder = Builder()
def unrepr(s):
if not s:
return s
# this is supposed to be safe
import ast
return ast.literal_eval(s)
class ConfigObjError(SyntaxError):
"""
This is the base class for all errors that ConfigObj raises.
It is a subclass of SyntaxError.
"""
def __init__(self, message='', line_number=None, line=''):
self.line = line
self.line_number = line_number
SyntaxError.__init__(self, message)
class NestingError(ConfigObjError):
"""
This error indicates a level of nesting that doesn't match.
"""
class ParseError(ConfigObjError):
"""
This error indicates that a line is badly written.
It is neither a valid ``key = value`` line,
nor a valid section marker line.
"""
class ReloadError(IOError):
"""
A 'reload' operation failed.
This exception is a subclass of ``IOError``.
"""
def __init__(self):
IOError.__init__(self, 'reload failed, filename is not set.')
class DuplicateError(ConfigObjError):
"""
The keyword or section specified already exists.
"""
class ConfigspecError(ConfigObjError):
"""
An error occured whilst parsing a configspec.
"""
class InterpolationError(ConfigObjError):
"""Base class for the two interpolation errors."""
class InterpolationLoopError(InterpolationError):
"""Maximum interpolation depth exceeded in string interpolation."""
def __init__(self, option):
InterpolationError.__init__(
self,
'interpolation loop detected in value "%s".' % option)
class RepeatSectionError(ConfigObjError):
"""
This error indicates additional sections in a section with a
``__many__`` (repeated) section.
"""
class MissingInterpolationOption(InterpolationError):
"""A value specified for interpolation was missing."""
def __init__(self, option):
msg = 'missing option "%s" in interpolation.' % option
InterpolationError.__init__(self, msg)
class UnreprError(ConfigObjError):
"""An error parsing in unrepr mode."""
class InterpolationEngine(object):
"""
A helper class to help perform string interpolation.
This class is an abstract base class; its descendants perform
the actual work.
"""
# compiled regexp to use in self.interpolate()
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
_cookie = '%'
def __init__(self, section):
# the Section instance that "owns" this engine
self.section = section
def interpolate(self, key, value):
# short-cut
if not self._cookie in value:
return value
def recursive_interpolate(key, value, section, backtrail):
"""The function that does the actual work.
``value``: the string we're trying to interpolate.
``section``: the section in which that string was found
``backtrail``: a dict to keep track of where we've been,
to detect and prevent infinite recursion loops
This is similar to a depth-first-search algorithm.
"""
# Have we been here already?
if (key, section.name) in backtrail:
# Yes - infinite loop detected
raise InterpolationLoopError(key)
# Place a marker on our backtrail so we won't come back here again
backtrail[(key, section.name)] = 1
# Now start the actual work
match = self._KEYCRE.search(value)
while match:
# The actual parsing of the match is implementation-dependent,
# so delegate to our helper function
k, v, s = self._parse_match(match)
if k is None:
# That's the signal that no further interpolation is needed
replacement = v
else:
# Further interpolation may be needed to obtain final value
replacement = recursive_interpolate(k, v, s, backtrail)
# Replace the matched string with its final value
start, end = match.span()
value = ''.join((value[:start], replacement, value[end:]))
new_search_start = start + len(replacement)
# Pick up the next interpolation key, if any, for next time
# through the while loop
match = self._KEYCRE.search(value, new_search_start)
# Now safe to come back here again; remove marker from backtrail
del backtrail[(key, section.name)]
return value
# Back in interpolate(), all we have to do is kick off the recursive
# function with appropriate starting values
value = recursive_interpolate(key, value, self.section, {})
return value
def _fetch(self, key):
"""Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
"""
# switch off interpolation before we try and fetch anything !
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
# Start at section that "owns" this InterpolationEngine
current_section = self.section
while True:
# try the current section first
val = current_section.get(key)
if val is not None and not isinstance(val, Section):
break
# try "DEFAULT" next
val = current_section.get('DEFAULT', {}).get(key)
if val is not None and not isinstance(val, Section):
break
# move up to parent and try again
# top-level's parent is itself
if current_section.parent is current_section:
# reached top level, time to give up
break
current_section = current_section.parent
# restore interpolation to previous value before returning
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section
def _parse_match(self, match):
"""Implementation-dependent helper function.
Will be passed a match object corresponding to the interpolation
key we just found (e.g., "%(foo)s" or "$foo"). Should look up that
key in the appropriate config file section (using the ``_fetch()``
helper function) and return a 3-tuple: (key, value, section)
``key`` is the name of the key we're looking for
``value`` is the value found for that key
``section`` is a reference to the section where it was found
``key`` and ``section`` should be None if no further
interpolation should be performed on the resulting value
(e.g., if we interpolated "$$" and returned "$").
"""
raise NotImplementedError()
class ConfigParserInterpolation(InterpolationEngine):
"""Behaves like ConfigParser."""
_cookie = '%'
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
def _parse_match(self, match):
key = match.group(1)
value, section = self._fetch(key)
return key, value, section
class TemplateInterpolation(InterpolationEngine):
"""Behaves like string.Template."""
_cookie = '$'
_delimiter = '$'
_KEYCRE = re.compile(r"""
\$(?:
(?P<escaped>\$) | # Two $ signs
(?P<named>[_a-z][_a-z0-9]*) | # $name format
{(?P<braced>[^}]*)} # ${name} format
)
""", re.IGNORECASE | re.VERBOSE)
def _parse_match(self, match):
# Valid name (in or out of braces): fetch value from section
key = match.group('named') or match.group('braced')
if key is not None:
value, section = self._fetch(key)
return key, value, section
# Escaped delimiter (e.g., $$): return single delimiter
if match.group('escaped') is not None:
# Return None for key and section to indicate it's time to stop
return None, self._delimiter, None
# Anything else: ignore completely, just return it unchanged
return None, match.group(), None
interpolation_engines = {
'configparser': ConfigParserInterpolation,
'template': TemplateInterpolation,
}
def __newobj__(cls, *args):
# Hack for pickle
return cls.__new__(cls, *args)
class Section(dict):
"""
A dictionary-like object that represents a section in a config file.
It does string interpolation if the 'interpolation' attribute
of the 'main' object is set to True.
Interpolation is tried first from this object, then from the 'DEFAULT'
section of this object, next from the parent and its 'DEFAULT' section,
and so on until the main object is reached.
A Section will behave like an ordered dictionary - following the
order of the ``scalars`` and ``sections`` attributes.
You can use this to change the order of members.
Iteration follows the order: scalars, then sections.
"""
def __setstate__(self, state):
dict.update(self, state[0])
self.__dict__.update(state[1])
def __reduce__(self):
state = (dict(self), self.__dict__)
return (__newobj__, (self.__class__,), state)
def __init__(self, parent, depth, main, indict=None, name=None):
"""
* parent is the section above
* depth is the depth level of this section
* main is the main ConfigObj
* indict is a dictionary to initialise the section with
"""
if indict is None:
indict = {}
dict.__init__(self)
# used for nesting level *and* interpolation
self.parent = parent
# used for the interpolation attribute
self.main = main
# level of nesting depth of this Section
self.depth = depth
# purely for information
self.name = name
#
self._initialise()
# we do this explicitly so that __setitem__ is used properly
# (rather than just passing to ``dict.__init__``)
for entry, value in indict.items():
self[entry] = value
def _initialise(self):
# the sequence of scalar values in this Section
self.scalars = []
# the sequence of sections in this Section
self.sections = []
# for comments :-)
self.comments = {}
self.inline_comments = {}
# the configspec
self.configspec = None
# for defaults
self.defaults = []
self.default_values = {}
self.extra_values = []
self._created = False
def _interpolate(self, key, value):
try:
# do we already have an interpolation engine?
engine = self._interpolation_engine
except AttributeError:
# not yet: first time running _interpolate(), so pick the engine
name = self.main.interpolation
if name == True: # note that "if name:" would be incorrect here
# backwards-compatibility: interpolation=True means use default
name = DEFAULT_INTERPOLATION
name = name.lower() # so that "Template", "template", etc. all work
class_ = interpolation_engines.get(name, None)
if class_ is None:
# invalid value for self.main.interpolation
self.main.interpolation = False
return value
else:
# save reference to engine so we don't have to do this again
engine = self._interpolation_engine = class_(self)
# let the engine do the actual work
return engine.interpolate(key, value)
def __getitem__(self, key):
"""Fetch the item and do string interpolation."""
val = dict.__getitem__(self, key)
if self.main.interpolation:
if isinstance(val, str):
return self._interpolate(key, val)
if isinstance(val, list):
def _check(entry):
if isinstance(entry, str):
return self._interpolate(key, entry)
return entry
new = [_check(entry) for entry in val]
if new != val:
return new
return val
def __setitem__(self, key, value, unrepr=False):
"""
Correctly set a value.
Making dictionary values Section instances.
(We have to special case 'Section' instances - which are also dicts)
Keys must be strings.
Values need only be strings (or lists of strings) if
``main.stringify`` is set.
``unrepr`` must be set when setting a value to a dictionary, without
creating a new sub-section.
"""
if not isinstance(key, str):
raise ValueError('The key "%s" is not a string.' % key)
# add the comment
if key not in self.comments:
self.comments[key] = []
self.inline_comments[key] = ''
# remove the entry from defaults
if key in self.defaults:
self.defaults.remove(key)
#
if isinstance(value, Section):
if key not in self:
self.sections.append(key)
dict.__setitem__(self, key, value)
elif isinstance(value, collections.Mapping) and not unrepr:
# First create the new depth level,
# then create the section
if key not in self:
self.sections.append(key)
new_depth = self.depth + 1
dict.__setitem__(
self,
key,
Section(
self,
new_depth,
self.main,
indict=value,
name=key))
else:
if key not in self:
self.scalars.append(key)
if not self.main.stringify:
if isinstance(value, str):
pass
elif isinstance(value, (list, tuple)):
for entry in value:
if not isinstance(entry, str):
raise TypeError('Value is not a string "%s".' % entry)
else:
raise TypeError('Value is not a string "%s".' % value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
"""Remove items from the sequence when deleting."""
dict. __delitem__(self, key)
if key in self.scalars:
self.scalars.remove(key)
else:
self.sections.remove(key)
del self.comments[key]
del self.inline_comments[key]
def get(self, key, default=None):
"""A version of ``get`` that doesn't bypass string interpolation."""
try:
return self[key]
except KeyError:
return default
def update(self, indict):
"""
A version of update that uses our ``__setitem__``.
"""
for entry in indict:
self[entry] = indict[entry]
def pop(self, key, default=MISSING):
"""
'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised'
"""
try:
val = self[key]
except KeyError:
if default is MISSING:
raise
val = default
else:
del self[key]
return val
def popitem(self):
"""Pops the first (key,val)"""
sequence = (self.scalars + self.sections)
if not sequence:
raise KeyError(": 'popitem(): dictionary is empty'")
key = sequence[0]
val = self[key]
del self[key]
return key, val
def clear(self):
"""
A version of clear that also affects scalars/sections
Also clears comments and configspec.
Leaves other attributes alone :
depth/main/parent are not affected
"""
dict.clear(self)
self.scalars = []
self.sections = []
self.comments = {}
self.inline_comments = {}
self.configspec = None
self.defaults = []
self.extra_values = []
def setdefault(self, key, default=None):
"""A version of setdefault that sets sequence if appropriate."""
try:
return self[key]
except KeyError:
self[key] = default
return self[key]
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples"""
return list(zip((self.scalars + self.sections), list(self.values())))
def keys(self):
"""D.keys() -> list of D's keys"""
return (self.scalars + self.sections)
def values(self):
"""D.values() -> list of D's values"""
return [self[key] for key in (self.scalars + self.sections)]
def iteritems(self):
"""D.iteritems() -> an iterator over the (key, value) items of D"""
return iter(list(self.items()))
def iterkeys(self):
"""D.iterkeys() -> an iterator over the keys of D"""
return iter((self.scalars + self.sections))
__iter__ = iterkeys
def itervalues(self):
"""D.itervalues() -> an iterator over the values of D"""
return iter(list(self.values()))
def __repr__(self):
"""x.__repr__() <==> repr(x)"""
def _getval(key):
try:
return self[key]
except MissingInterpolationOption:
return dict.__getitem__(self, key)
return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
for key in (self.scalars + self.sections)])
__str__ = __repr__
__str__.__doc__ = "x.__str__() <==> str(x)"
# Extra methods - not in a normal dictionary
def dict(self):
"""
Return a deepcopy of self as a dictionary.
All members that are ``Section`` instances are recursively turned to
ordinary dictionaries - by calling their ``dict`` method.
>>> n = a.dict()
>>> n == a
1
>>> n is a
0
"""
newdict = {}
for entry in self:
this_entry = self[entry]
if isinstance(this_entry, Section):
this_entry = this_entry.dict()
elif isinstance(this_entry, list):
# create a copy rather than a reference
this_entry = list(this_entry)
elif isinstance(this_entry, tuple):
# create a copy rather than a reference
this_entry = tuple(this_entry)
newdict[entry] = this_entry
return newdict
def merge(self, indict):
"""
A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}})
"""
for key, val in list(indict.items()):
if (key in self and isinstance(self[key], collections.Mapping) and
isinstance(val, collections.Mapping)):
self[key].merge(val)
else:
self[key] = val
def rename(self, oldkey, newkey):
"""
Change a keyname to another, without changing position in sequence.
Implemented so that transformations can be made on keys,
as well as on values. (used by encode and decode)
Also renames comments.
"""
if oldkey in self.scalars:
the_list = self.scalars
elif oldkey in self.sections:
the_list = self.sections
else:
raise KeyError('Key "%s" not found.' % oldkey)
pos = the_list.index(oldkey)
#
val = self[oldkey]
dict.__delitem__(self, oldkey)
dict.__setitem__(self, newkey, val)
the_list.remove(oldkey)
the_list.insert(pos, newkey)
comm = self.comments[oldkey]
inline_comment = self.inline_comments[oldkey]
del self.comments[oldkey]
del self.inline_comments[oldkey]
self.comments[newkey] = comm
self.inline_comments[newkey] = inline_comment
def walk(self, function, raise_errors=True,
call_on_sections=False, **keywargs):
"""
Walk every member and call a function on the keyword and value.
Return a dictionary of the return values
If the function raises an exception, raise the errror
unless ``raise_errors=False``, in which case set the return value to
``False``.
Any unrecognised keyword arguments you pass to walk, will be pased on
to the function you pass in.
Note: if ``call_on_sections`` is ``True`` then - on encountering a
subsection, *first* the function is called for the *whole* subsection,
and then recurses into it's members. This means your function must be
able to handle strings, dictionaries and lists. This allows you
to change the key of subsections as well as for ordinary members. The
return value when called on the whole subsection has to be discarded.
See the encode and decode methods for examples, including functions.
.. admonition:: caution
You can use ``walk`` to transform the names of members of a section
but you mustn't add or delete members.
>>> config = '''[XXXXsection]
... XXXXkey = XXXXvalue'''.splitlines()
>>> cfg = ConfigObj(config)
>>> cfg
ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}})
>>> def transform(section, key):
... val = section[key]
... newkey = key.replace('XXXX', 'CLIENT1')
... section.rename(key, newkey)
... if isinstance(val, (tuple, list, dict)):
... pass
... else:
... val = val.replace('XXXX', 'CLIENT1')
... section[newkey] = val
>>> cfg.walk(transform, call_on_sections=True)
{'CLIENT1section': {'CLIENT1key': None}}
>>> cfg
ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}})
"""
out = {}
# scalars first
for i in range(len(self.scalars)):
entry = self.scalars[i]
try:
val = function(self, entry, **keywargs)
# bound again in case name has changed
entry = self.scalars[i]
out[entry] = val
except Exception:
if raise_errors:
raise
else:
entry = self.scalars[i]
out[entry] = False
# then sections
for i in range(len(self.sections)):
entry = self.sections[i]
if call_on_sections:
try:
function(self, entry, **keywargs)
except Exception:
if raise_errors:
raise
else:
entry = self.sections[i]
out[entry] = False
# bound again in case name has changed
entry = self.sections[i]
# previous result is discarded
out[entry] = self[entry].walk(
function,
raise_errors=raise_errors,
call_on_sections=call_on_sections,
**keywargs)
return out
def as_bool(self, key):
"""
Accepts a key as input. The corresponding value must be a string or
the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
retain compatibility with Python 2.2.
If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns
``True``.
If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns
``False``.
``as_bool`` is not case sensitive.
Any other input will raise a ``ValueError``.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_bool('a')
Traceback (most recent call last):
ValueError: Value "fish" is neither True nor False
>>> a['b'] = 'True'
>>> a.as_bool('b')
1
>>> a['b'] = 'off'
>>> a.as_bool('b')
0
"""
val = self[key]
if val == True:
return True
elif val == False:
return False
else:
try:
if not isinstance(val, str):
# TODO: Why do we raise a KeyError here?
raise KeyError()
else:
return self.main._bools[val.lower()]
except KeyError:
raise ValueError('Value "%s" is neither True nor False' % val)
def as_int(self, key):
"""
A convenience method which coerces the specified value to an integer.
If the value is an invalid literal for ``int``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_int('a')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: 'fish'
>>> a['b'] = '1'
>>> a.as_int('b')
1
>>> a['b'] = '3.2'
>>> a.as_int('b')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: '3.2'
"""
return int(self[key])
def as_float(self, key):
"""
A convenience method which coerces the specified value to a float.
If the value is an invalid literal for ``float``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_float('a') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: invalid literal for float(): fish
>>> a['b'] = '1'
>>> a.as_float('b')
1.0
>>> a['b'] = '3.2'
>>> a.as_float('b') #doctest: +ELLIPSIS
3.2...
"""
return float(self[key])
def as_list(self, key):
"""
A convenience method which fetches the specified value, guaranteeing
that it is a list.
>>> a = ConfigObj()
>>> a['a'] = 1
>>> a.as_list('a')
[1]
>>> a['a'] = (1,)
>>> a.as_list('a')
[1]
>>> a['a'] = [1]
>>> a.as_list('a')
[1]
"""
result = self[key]
if isinstance(result, (tuple, list)):
return list(result)
return [result]
def restore_default(self, key):
"""
Restore (and return) default value for the specified key.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
If there is no default value for this key, ``KeyError`` is raised.
"""
default = self.default_values[key]
dict.__setitem__(self, key, default)
if key not in self.defaults:
self.defaults.append(key)
return default
def restore_defaults(self):
"""
Recursively restore default values to all members
that have them.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
It doesn't delete or modify entries without default values.
"""
for key in self.default_values:
self.restore_default(key)
for section in self.sections:
self[section].restore_defaults()
class ConfigObj(Section):
"""An object to read, create, and write config files."""
_keyword = re.compile(r'''^ # line start
(\s*) # indentation
( # keyword
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"=].*?) # no quotes
)
\s*=\s* # divider
(.*) # value (including list values and comments)
$ # line end
''',
re.VERBOSE)
_sectionmarker = re.compile(r'''^
(\s*) # 1: indentation
((?:\[\s*)+) # 2: section marker open
( # 3: section name open
(?:"\s*\S.*?\s*")| # at least one non-space with double quotes
(?:'\s*\S.*?\s*')| # at least one non-space with single quotes
(?:[^'"\s].*?) # at least one non-space unquoted
) # section name close
((?:\s*\])+) # 4: section marker close
\s*(\#.*)? # 5: optional comment
$''',
re.VERBOSE)
# this regexp pulls list values out as a single string
# or single values and comments
# FIXME: this regex adds a '' to the end of comma terminated lists
# workaround in ``_handle_value``
_valueexp = re.compile(r'''^
(?:
(?:
(
(?:
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#][^,\#]*?) # unquoted
)
\s*,\s* # comma
)* # match all list items ending in a comma (if any)
)
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#\s][^,]*?)| # unquoted
(?:(?<!,)) # Empty value
)? # last item in a list - or string value
)|
(,) # alternatively a single comma - empty list
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# use findall to get the members of a list value
_listvalueexp = re.compile(r'''
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#]?.*?) # unquoted
)
\s*,\s* # comma
''',
re.VERBOSE)
# this regexp is used for the value
# when lists are switched off
_nolistvalue = re.compile(r'''^
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"\#].*?)| # unquoted
(?:) # Empty value
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# regexes for finding triple quoted values on one line
_single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$")
_single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$')
_multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$")
_multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$')
_triple_quote = {
"'''": (_single_line_single, _multi_line_single),
'"""': (_single_line_double, _multi_line_double),
}
# Used by the ``istrue`` Section method
_bools = {
'yes': True, 'no': False,
'on': True, 'off': False,
'1': True, '0': False,
'true': True, 'false': False,
}
def __init__(self, infile=None, options=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False):
"""
Parse a config file or create a config file object.
``ConfigObj(infile=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False)``
"""
self._inspec = _inspec
# init the superclass
Section.__init__(self, self, 0, self)
infile = infile or []
_options = {'configspec': configspec,
'encoding': encoding, 'interpolation': interpolation,
'raise_errors': raise_errors, 'list_values': list_values,
'create_empty': create_empty, 'file_error': file_error,
'stringify': stringify, 'indent_type': indent_type,
'default_encoding': default_encoding, 'unrepr': unrepr,
'write_empty_values': write_empty_values}
if options is None:
options = _options
else:
import warnings
warnings.warn('Passing in an options dictionary to ConfigObj() is '
'deprecated. Use **options instead.',
DeprecationWarning)
# TODO: check the values too.
for entry in options:
if entry not in OPTION_DEFAULTS:
raise TypeError('Unrecognised option "%s".' % entry)
for entry, value in list(OPTION_DEFAULTS.items()):
if entry not in options:
options[entry] = value
keyword_value = _options[entry]
if value != keyword_value:
options[entry] = keyword_value
# XXXX this ignores an explicit list_values = True in combination
# with _inspec. The user should *never* do that anyway, but still...
if _inspec:
options['list_values'] = False
self._initialise(options)
configspec = options['configspec']
self._original_configspec = configspec
self._load(infile, configspec)
def _load(self, infile, configspec):
if isinstance(infile, str):
self.filename = infile
if os.path.isfile(infile):
with open(infile, 'rb') as h:
content = h.readlines() or []
elif self.file_error:
# raise an error if the file doesn't exist
raise IOError('Config file not found: "%s".' % self.filename)
else:
# file doesn't already exist
if self.create_empty:
# this is a good test that the filename specified
# isn't impossible - like on a non-existent device
with open(infile, 'w') as h:
h.write('')
content = []
elif isinstance(infile, (list, tuple)):
content = list(infile)
elif isinstance(infile, dict):
# initialise self
# the Section class handles creating subsections
if isinstance(infile, ConfigObj):
# get a copy of our ConfigObj
def set_section(in_section, this_section):
for entry in in_section.scalars:
this_section[entry] = in_section[entry]
for section in in_section.sections:
this_section[section] = {}
set_section(in_section[section], this_section[section])
set_section(infile, self)
else:
for entry in infile:
self[entry] = infile[entry]
del self._errors
if configspec is not None:
self._handle_configspec(configspec)
else:
self.configspec = None
return
elif getattr(infile, 'read', MISSING) is not MISSING:
# This supports file like objects
content = infile.read() or []
# needs splitting into lines - but needs doing *after* decoding
# in case it's not an 8 bit encoding
else:
raise TypeError('infile must be a filename, file like object, or list of lines.')
if content:
# don't do it for the empty ConfigObj
content = self._handle_bom(content)
# infile is now *always* a list
#
# Set the newlines attribute (first line ending it finds)
# and strip trailing '\n' or '\r' from lines
for line in content:
if (not line) or (line[-1] not in ('\r', '\n')):
continue
for end in ('\r\n', '\n', '\r'):
if line.endswith(end):
self.newlines = end
break
break
assert all(isinstance(line, str) for line in content), repr(content)
content = [line.rstrip('\r\n') for line in content]
self._parse(content)
# if we had any errors, now is the time to raise them
if self._errors:
info = "at line %s." % self._errors[0].line_number
if len(self._errors) > 1:
msg = "Parsing failed with several errors.\nFirst error %s" % info
error = ConfigObjError(msg)
else:
error = self._errors[0]
# set the errors attribute; it's a list of tuples:
# (error_type, message, line_number)
error.errors = self._errors
# set the config attribute
error.config = self
raise error
# delete private attributes
del self._errors
if configspec is None:
self.configspec = None
else:
self._handle_configspec(configspec)
def _initialise(self, options=None):
if options is None:
options = OPTION_DEFAULTS
# initialise a few variables
self.filename = None
self._errors = []
self.raise_errors = options['raise_errors']
self.interpolation = options['interpolation']
self.list_values = options['list_values']
self.create_empty = options['create_empty']
self.file_error = options['file_error']
self.stringify = options['stringify']
self.indent_type = options['indent_type']
self.encoding = options['encoding']
self.default_encoding = options['default_encoding']
self.BOM = False
self.newlines = None
self.write_empty_values = options['write_empty_values']
self.unrepr = options['unrepr']
self.initial_comment = []
self.final_comment = []
self.configspec = None
if self._inspec:
self.list_values = False
# Clear section attributes as well
Section._initialise(self)
def __repr__(self):
def _getval(key):
try:
return self[key]
except MissingInterpolationOption:
return dict.__getitem__(self, key)
return ('%s({%s})' % (self.__class__.__name__,
', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
for key in (self.scalars + self.sections)])))
def _handle_bom(self, infile):
"""
Handle any BOM, and decode if necessary.
If an encoding is specified, that *must* be used - but the BOM should
still be removed (and the BOM attribute set).
(If the encoding is wrongly specified, then a BOM for an alternative
encoding won't be discovered or removed.)
If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
removed. The BOM attribute will be set. UTF16 will be decoded to
unicode.
NOTE: This method must not be called with an empty ``infile``.
Specifying the *wrong* encoding is likely to cause a
``UnicodeDecodeError``.
``infile`` must always be returned as a list of lines, but may be
passed in as a single string.
"""
if ((self.encoding is not None) and
(self.encoding.lower() not in BOM_LIST)):
# No need to check for a BOM
# the encoding specified doesn't have one
# just decode
return self._decode(infile, self.encoding)
if isinstance(infile, (list, tuple)):
line = infile[0]
else:
line = infile
if isinstance(line, str):
# it's already decoded and there's no need to do anything
# else, just use the _decode utility method to handle
# listifying appropriately
return self._decode(infile, self.encoding)
if self.encoding is not None:
# encoding explicitly supplied
# And it could have an associated BOM
# TODO: if encoding is just UTF16 - we ought to check for both
# TODO: big endian and little endian versions.
enc = BOM_LIST[self.encoding.lower()]
if enc == 'utf_16':
# For UTF16 we try big endian and little endian
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not final_encoding:
# skip UTF8
continue
if infile.startswith(BOM):
### BOM discovered
##self.BOM = True
# Don't need to remove BOM
return self._decode(infile, encoding)
# If we get this far, will *probably* raise a DecodeError
# As it doesn't appear to start with a BOM
return self._decode(infile, self.encoding)
# Must be UTF8
BOM = BOM_SET[enc]
if not line.startswith(BOM):
return self._decode(infile, self.encoding)
newline = line[len(BOM):]
# BOM removed
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
self.BOM = True
return self._decode(infile, self.encoding)
# No encoding specified - so we need to check for UTF8/UTF16
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not isinstance(line, bytes) or not line.startswith(BOM):
# didn't specify a BOM, or it's not a bytestring
continue
else:
# BOM discovered
self.encoding = final_encoding
if not final_encoding:
self.BOM = True
# UTF8
# remove BOM
newline = line[len(BOM):]
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
# UTF-8
if isinstance(infile, str):
return infile.splitlines(True)
elif isinstance(infile, bytes):
return infile.decode('utf-8').splitlines(True)
else:
return self._decode(infile, 'utf-8')
# UTF16 - have to decode
return self._decode(infile, encoding)
# No BOM discovered and no encoding specified, default to UTF-8
if isinstance(infile, bytes):
return infile.decode('utf-8').splitlines(True)
else:
return self._decode(infile, 'utf-8')
def _a_to_u(self, aString):
"""Decode ASCII strings to unicode if a self.encoding is specified."""
if isinstance(aString, bytes) and self.encoding:
return aString.decode(self.encoding)
else:
return aString
def _decode(self, infile, encoding):
"""
Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list.
"""
if isinstance(infile, str):
return infile.splitlines(True)
if isinstance(infile, bytes):
# NOTE: Could raise a ``UnicodeDecodeError``
if encoding:
return infile.decode(encoding).splitlines(True)
else:
return infile.splitlines(True)
if encoding:
for i, line in enumerate(infile):
if isinstance(line, bytes):
# NOTE: The isinstance test here handles mixed lists of unicode/string
# NOTE: But the decode will break on any non-string values
# NOTE: Or could raise a ``UnicodeDecodeError``
infile[i] = line.decode(encoding)
return infile
def _decode_element(self, line):
"""Decode element to unicode if necessary."""
if isinstance(line, bytes) and self.default_encoding:
return line.decode(self.default_encoding)
else:
return line
# TODO: this may need to be modified
def _str(self, value):
"""
Used by ``stringify`` within validate, to turn non-string values
into strings.
"""
if not isinstance(value, str):
# intentially 'str' because it's just whatever the "normal"
# string type is for the python version we're dealing with
return str(value)
else:
return value
def _parse(self, infile):
"""Actually parse the config file."""
temp_list_values = self.list_values
if self.unrepr:
self.list_values = False
comment_list = []
done_start = False
this_section = self
maxline = len(infile) - 1
cur_index = -1
reset_comment = False
while cur_index < maxline:
if reset_comment:
comment_list = []
cur_index += 1
line = infile[cur_index]
sline = line.strip()
# do we have anything on the line ?
if not sline or sline.startswith('#'):
reset_comment = False
comment_list.append(line)
continue
if not done_start:
# preserve initial comment
self.initial_comment = comment_list
comment_list = []
done_start = True
reset_comment = True
# first we check if it's a section marker
mat = self._sectionmarker.match(line)
if mat is not None:
# is a section line
(indent, sect_open, sect_name, sect_close, comment) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
cur_depth = sect_open.count('[')
if cur_depth != sect_close.count(']'):
self._handle_error("Cannot compute the section depth",
NestingError, infile, cur_index)
continue
if cur_depth < this_section.depth:
# the new section is dropping back to a previous level
try:
parent = self._match_depth(this_section,
cur_depth).parent
except SyntaxError:
self._handle_error("Cannot compute nesting level",
NestingError, infile, cur_index)
continue
elif cur_depth == this_section.depth:
# the new section is a sibling of the current section
parent = this_section.parent
elif cur_depth == this_section.depth + 1:
# the new section is a child the current section
parent = this_section
else:
self._handle_error("Section too nested",
NestingError, infile, cur_index)
continue
sect_name = self._unquote(sect_name)
if sect_name in parent:
self._handle_error('Duplicate section name',
DuplicateError, infile, cur_index)
continue
# create the new section
this_section = Section(
parent,
cur_depth,
self,
name=sect_name)
parent[sect_name] = this_section
parent.inline_comments[sect_name] = comment
parent.comments[sect_name] = comment_list
continue
#
# it's not a section marker,
# so it should be a valid ``key = value`` line
mat = self._keyword.match(line)
if mat is None:
self._handle_error(
'Invalid line ({0!r}) (matched as neither section nor keyword)'.format(line),
ParseError, infile, cur_index)
else:
# is a keyword value
# value will include any inline comment
(indent, key, value) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
# check for a multiline value
if value[:3] in ['"""', "'''"]:
try:
value, comment, cur_index = self._multiline(
value, infile, cur_index, maxline)
except SyntaxError:
self._handle_error(
'Parse error in multiline value',
ParseError, infile, cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if type(e) == UnknownType:
msg = 'Unknown name or type in value'
else:
msg = 'Parse error from unrepr-ing multiline value'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if isinstance(e, UnknownType):
msg = 'Unknown name or type in value'
else:
msg = 'Parse error from unrepr-ing value'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
# extract comment and lists
try:
(value, comment) = self._handle_value(value)
except SyntaxError:
self._handle_error(
'Parse error in value',
ParseError, infile, cur_index)
continue
#
key = self._unquote(key)
if key in this_section:
self._handle_error(
'Duplicate keyword name',
DuplicateError, infile, cur_index)
continue
# add the key.
# we set unrepr because if we have got this far we will never
# be creating a new section
this_section.__setitem__(key, value, unrepr=True)
this_section.inline_comments[key] = comment
this_section.comments[key] = comment_list
continue
#
if self.indent_type is None:
# no indentation used, set the type accordingly
self.indent_type = ''
# preserve the final comment
if not self and not self.initial_comment:
self.initial_comment = comment_list
elif not reset_comment:
self.final_comment = comment_list
self.list_values = temp_list_values
def _match_depth(self, sect, depth):
"""
Given a section and a depth level, walk back through the sections
parents to see if the depth level matches a previous section.
Return a reference to the right section,
or raise a SyntaxError.
"""
while depth < sect.depth:
if sect is sect.parent:
# we've reached the top level already
raise SyntaxError()
sect = sect.parent
if sect.depth == depth:
return sect
# shouldn't get here
raise SyntaxError()
def _handle_error(self, text, ErrorClass, infile, cur_index):
"""
Handle an error according to the error settings.
Either raise the error or store it.
The error will have occured at ``cur_index``
"""
line = infile[cur_index]
cur_index += 1
message = '{0} at line {1}.'.format(text, cur_index)
error = ErrorClass(message, cur_index, line)
if self.raise_errors:
# raise the error - parsing stops here
raise error
# store the error
# reraise when parsing has finished
self._errors.append(error)
def _unquote(self, value):
"""Return an unquoted version of a value"""
if not value:
# should only happen during parsing of lists
raise SyntaxError
if (value[0] == value[-1]) and (value[0] in ('"', "'")):
value = value[1:-1]
return value
def _quote(self, value, multiline=True):
"""
Return a safely quoted version of a value.
Raise a ConfigObjError if the value cannot be safely quoted.
If multiline is ``True`` (default) then use triple quotes
if necessary.
* Don't quote values that don't need it.
* Recursively quote members of a list and return a comma joined list.
* Multiline is ``False`` for lists.
* Obey list syntax for empty and single member lists.
If ``list_values=False`` then the value is only quoted if it contains
a ``\\n`` (is multiline) or '#'.
If ``write_empty_values`` is set, and the value is an empty string, it
won't be quoted.
"""
if multiline and self.write_empty_values and value == '':
# Only if multiline is set, so that it is used for values not
# keys, and not values that are part of a list
return ''
if multiline and isinstance(value, (list, tuple)):
if not value:
return ','
elif len(value) == 1:
return self._quote(value[0], multiline=False) + ','
return ', '.join([self._quote(val, multiline=False)
for val in value])
if not isinstance(value, str):
if self.stringify:
# intentially 'str' because it's just whatever the "normal"
# string type is for the python version we're dealing with
value = str(value)
else:
raise TypeError('Value "%s" is not a string.' % value)
if not value:
return '""'
no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
if check_for_single:
if not self.list_values:
# we don't quote if ``list_values=False``
quot = noquot
# for normal values either single or double quotes will do
elif '\n' in value:
# will only happen if multiline is off - e.g. '\n' in key
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif ((value[0] not in wspace_plus) and
(value[-1] not in wspace_plus) and
(',' not in value)):
quot = noquot
else:
quot = self._get_single_quote(value)
else:
# if value has '\n' or "'" *and* '"', it will need triple quotes
quot = self._get_triple_quote(value)
if quot == noquot and '#' in value and self.list_values:
quot = self._get_single_quote(value)
return quot % value
def _get_single_quote(self, value):
if ("'" in value) and ('"' in value):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif '"' in value:
quot = squot
else:
quot = dquot
return quot
def _get_triple_quote(self, value):
if (value.find('"""') != -1) and (value.find("'''") != -1):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
if value.find('"""') == -1:
quot = tdquot
else:
quot = tsquot
return quot
def _handle_value(self, value):
"""
Given a value string, unquote, remove comment,
handle lists. (including empty and single member lists)
"""
if self._inspec:
# Parsing a configspec so don't handle comments
return (value, '')
# do we look for lists in values ?
if not self.list_values:
mat = self._nolistvalue.match(value)
if mat is None:
raise SyntaxError()
# NOTE: we don't unquote here
return mat.groups()
#
mat = self._valueexp.match(value)
if mat is None:
# the value is badly constructed, probably badly quoted,
# or an invalid list
raise SyntaxError()
(list_values, single, empty_list, comment) = mat.groups()
if (list_values == '') and (single is None):
# change this if you want to accept empty values
raise SyntaxError()
# NOTE: note there is no error handling from here if the regex
# is wrong: then incorrect values will slip through
if empty_list is not None:
# the single comma - meaning an empty list
return ([], comment)
if single is not None:
# handle empty values
if list_values and not single:
# FIXME: the '' is a workaround because our regex now matches
# '' at the end of a list if it has a trailing comma
single = None
else:
single = single or '""'
single = self._unquote(single)
if list_values == '':
# not a list value
return (single, comment)
the_list = self._listvalueexp.findall(list_values)
the_list = [self._unquote(val) for val in the_list]
if single is not None:
the_list += [single]
return (the_list, comment)
def _multiline(self, value, infile, cur_index, maxline):
"""Extract the value, where we are in a multiline situation."""
quot = value[:3]
newvalue = value[3:]
single_line = self._triple_quote[quot][0]
multi_line = self._triple_quote[quot][1]
mat = single_line.match(value)
if mat is not None:
retval = list(mat.groups())
retval.append(cur_index)
return retval
elif newvalue.find(quot) != -1:
# somehow the triple quote is missing
raise SyntaxError()
#
while cur_index < maxline:
cur_index += 1
newvalue += '\n'
line = infile[cur_index]
if line.find(quot) == -1:
newvalue += line
else:
# end of multiline, process it
break
else:
# we've got to the end of the config, oops...
raise SyntaxError()
mat = multi_line.match(line)
if mat is None:
# a badly formed line
raise SyntaxError()
(value, comment) = mat.groups()
return (newvalue + value, comment, cur_index)
def _handle_configspec(self, configspec):
"""Parse the configspec."""
# FIXME: Should we check that the configspec was created with the
# correct settings ? (i.e. ``list_values=False``)
if not isinstance(configspec, ConfigObj):
try:
configspec = ConfigObj(configspec,
raise_errors=True,
file_error=True,
_inspec=True)
except ConfigObjError as e:
# FIXME: Should these errors have a reference
# to the already parsed ConfigObj ?
raise ConfigspecError('Parsing configspec failed: %s' % e)
except IOError as e:
raise IOError('Reading configspec failed: %s' % e)
self.configspec = configspec
def _set_configspec(self, section, copy):
"""
Called by validate. Handles setting the configspec on subsections
including sections to be validated by __many__
"""
configspec = section.configspec
many = configspec.get('__many__')
if isinstance(many, dict):
for entry in section.sections:
if entry not in configspec:
section[entry].configspec = many
for entry in configspec.sections:
if entry == '__many__':
continue
if entry not in section:
section[entry] = {}
section[entry]._created = True
if copy:
# copy comments
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
# Could be a scalar when we expect a section
if isinstance(section[entry], Section):
section[entry].configspec = configspec[entry]
def _write_line(self, indent_string, entry, this_entry, comment):
"""Write an individual line, for the write method"""
# NOTE: the calls to self._quote here handles non-StringType values.
if not self.unrepr:
val = self._decode_element(self._quote(this_entry))
else:
val = repr(this_entry)
return '%s%s%s%s%s' % (indent_string,
self._decode_element(self._quote(entry, multiline=False)),
self._a_to_u(' = '),
val,
self._decode_element(comment))
def _write_marker(self, indent_string, depth, entry, comment):
"""Write a section marker line"""
return '%s%s%s%s%s' % (indent_string,
self._a_to_u('[' * depth),
self._quote(self._decode_element(entry), multiline=False),
self._a_to_u(']' * depth),
self._decode_element(comment))
def _handle_comment(self, comment):
"""Deal with a comment."""
if not comment:
return ''
start = self.indent_type
if not comment.startswith('#'):
start += self._a_to_u(' # ')
return (start + comment)
# Public methods
def write(self, outfile=None, section=None):
"""
Write the current ConfigObj as a file
tekNico: FIXME: use StringIO instead of real files
>>> filename = a.filename
>>> a.filename = 'test.ini'
>>> a.write()
>>> a.filename = filename
>>> a == ConfigObj('test.ini', raise_errors=True)
1
>>> import os
>>> os.remove('test.ini')
"""
if self.indent_type is None:
# this can be true if initialised from a dictionary
self.indent_type = DEFAULT_INDENT_TYPE
out = []
cs = self._a_to_u('#')
csp = self._a_to_u('# ')
if section is None:
int_val = self.interpolation
self.interpolation = False
section = self
for line in self.initial_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
indent_string = self.indent_type * section.depth
for entry in (section.scalars + section.sections):
if entry in section.defaults:
# don't write out default values
continue
for comment_line in section.comments[entry]:
comment_line = self._decode_element(comment_line.lstrip())
if comment_line and not comment_line.startswith(cs):
comment_line = csp + comment_line
out.append(indent_string + comment_line)
this_entry = section[entry]
comment = self._handle_comment(section.inline_comments[entry])
if isinstance(this_entry, Section):
# a section
out.append(self._write_marker(
indent_string,
this_entry.depth,
entry,
comment))
out.extend(self.write(section=this_entry))
else:
out.append(self._write_line(
indent_string,
entry,
this_entry,
comment))
if section is self:
for line in self.final_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
self.interpolation = int_val
if section is not self:
return out
if (self.filename is None) and (outfile is None):
# output a list of lines
# might need to encode
# NOTE: This will *screw* UTF16, each line will start with the BOM
if self.encoding:
out = [l.encode(self.encoding) for l in out]
if (self.BOM and ((self.encoding is None) or
(BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
# Add the UTF8 BOM
if not out:
out.append('')
out[0] = BOM_UTF8 + out[0]
return out
# Turn the list to a string, joined with correct newlines
newline = self.newlines or os.linesep
if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w'
and sys.platform == 'win32' and newline == '\r\n'):
# Windows specific hack to avoid writing '\r\r\n'
newline = '\n'
output = self._a_to_u(newline).join(out)
if not output.endswith(newline):
output += newline
if isinstance(output, bytes):
output_bytes = output
else:
output_bytes = output.encode(self.encoding or
self.default_encoding or
'ascii')
if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)):
# Add the UTF8 BOM
output_bytes = BOM_UTF8 + output_bytes
if outfile is not None:
outfile.write(output_bytes)
else:
with open(self.filename, 'wb') as h:
h.write(output_bytes)
def validate(self, validator, preserve_errors=False, copy=False,
section=None):
"""
Test the ConfigObj against a configspec.
It uses the ``validator`` object from *validate.py*.
To run ``validate`` on the current ConfigObj, call: ::
test = config.validate(validator)
(Normally having previously passed in the configspec when the ConfigObj
was created - you can dynamically assign a dictionary of checks to the
``configspec`` attribute of a section though).
It returns ``True`` if everything passes, or a dictionary of
pass/fails (True/False). If every member of a subsection passes, it
will just have the value ``True``. (It also returns ``False`` if all
members fail).
In addition, it converts the values from strings to their native
types if their checks pass (and ``stringify`` is set).
If ``preserve_errors`` is ``True`` (``False`` is default) then instead
of a marking a fail with a ``False``, it will preserve the actual
exception object. This can contain info about the reason for failure.
For example the ``VdtValueTooSmallError`` indicates that the value
supplied was too small. If a value (or section) is missing it will
still be marked as ``False``.
You must have the validate module to use ``preserve_errors=True``.
You can then use the ``flatten_errors`` function to turn your nested
results dictionary into a flattened list of failures - useful for
displaying meaningful error messages.
"""
if section is None:
if self.configspec is None:
raise ValueError('No configspec supplied.')
if preserve_errors:
# We do this once to remove a top level dependency on the validate module
# Which makes importing configobj faster
from .validate import VdtMissingValue
self._vdtMissingValue = VdtMissingValue
section = self
if copy:
section.initial_comment = section.configspec.initial_comment
section.final_comment = section.configspec.final_comment
section.encoding = section.configspec.encoding
section.BOM = section.configspec.BOM
section.newlines = section.configspec.newlines
section.indent_type = section.configspec.indent_type
#
# section.default_values.clear() #??
configspec = section.configspec
self._set_configspec(section, copy)
def validate_entry(entry, spec, val, missing, ret_true, ret_false):
section.default_values.pop(entry, None)
try:
section.default_values[entry] = validator.get_default_value(configspec[entry])
except (KeyError, AttributeError, validator.baseErrorClass):
# No default, bad default or validator has no 'get_default_value'
# (e.g. SimpleVal)
pass
try:
check = validator.check(spec,
val,
missing=missing
)
except validator.baseErrorClass as e:
if not preserve_errors or isinstance(e, self._vdtMissingValue):
out[entry] = False
else:
# preserve the error
out[entry] = e
ret_false = False
ret_true = False
else:
ret_false = False
out[entry] = True
if self.stringify or missing:
# if we are doing type conversion
# or the value is a supplied default
if not self.stringify:
if isinstance(check, (list, tuple)):
# preserve lists
check = [self._str(item) for item in check]
elif missing and check is None:
# convert the None from a default to a ''
check = ''
else:
check = self._str(check)
if (check != val) or missing:
section[entry] = check
if not copy and missing and entry not in section.defaults:
section.defaults.append(entry)
return ret_true, ret_false
#
out = {}
ret_true = True
ret_false = True
unvalidated = [k for k in section.scalars if k not in configspec]
incorrect_sections = [k for k in configspec.sections if k in section.scalars]
incorrect_scalars = [k for k in configspec.scalars if k in section.sections]
for entry in configspec.scalars:
if entry in ('__many__', '___many___'):
# reserved names
continue
if (not entry in section.scalars) or (entry in section.defaults):
# missing entries
# or entries from defaults
missing = True
val = None
if copy and entry not in section.scalars:
# copy comments
section.comments[entry] = (
configspec.comments.get(entry, []))
section.inline_comments[entry] = (
configspec.inline_comments.get(entry, ''))
#
else:
missing = False
val = section[entry]
ret_true, ret_false = validate_entry(entry, configspec[entry], val,
missing, ret_true, ret_false)
many = None
if '__many__' in configspec.scalars:
many = configspec['__many__']
elif '___many___' in configspec.scalars:
many = configspec['___many___']
if many is not None:
for entry in unvalidated:
val = section[entry]
ret_true, ret_false = validate_entry(entry, many, val, False,
ret_true, ret_false)
unvalidated = []
for entry in incorrect_scalars:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Value %r was provided as a section' % entry
out[entry] = validator.baseErrorClass(msg)
for entry in incorrect_sections:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Section %r was provided as a single value' % entry
out[entry] = validator.baseErrorClass(msg)
# Missing sections will have been created as empty ones when the
# configspec was read.
for entry in section.sections:
# FIXME: this means DEFAULT is not copied in copy mode
if section is self and entry == 'DEFAULT':
continue
if section[entry].configspec is None:
unvalidated.append(entry)
continue
if copy:
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry])
out[entry] = check
if check == False:
ret_true = False
elif check == True:
ret_false = False
else:
ret_true = False
section.extra_values = unvalidated
if preserve_errors and not section._created:
# If the section wasn't created (i.e. it wasn't missing)
# then we can't return False, we need to preserve errors
ret_false = False
#
if ret_false and preserve_errors and out:
# If we are preserving errors, but all
# the failures are from missing sections / values
# then we can return False. Otherwise there is a
# real failure that we need to preserve.
ret_false = not any(out.values())
if ret_true:
return True
elif ret_false:
return False
return out
def reset(self):
"""Clear ConfigObj instance and restore to 'freshly created' state."""
self.clear()
self._initialise()
# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
# requires an empty dictionary
self.configspec = None
# Just to be sure ;-)
self._original_configspec = None
def reload(self):
"""
Reload a ConfigObj from file.
This method raises a ``ReloadError`` if the ConfigObj doesn't have
a filename attribute pointing to a file.
"""
if not isinstance(self.filename, str):
raise ReloadError()
filename = self.filename
current_options = {}
for entry in OPTION_DEFAULTS:
if entry == 'configspec':
continue
current_options[entry] = getattr(self, entry)
configspec = self._original_configspec
current_options['configspec'] = configspec
self.clear()
self._initialise(current_options)
self._load(filename, configspec)
class SimpleVal(object):
"""
A simple validator.
Can be used to check that all members expected are present.
To use it, provide a configspec with all your members in (the value given
will be ignored). Pass an instance of ``SimpleVal`` to the ``validate``
method of your ``ConfigObj``. ``validate`` will return ``True`` if all
members are present, or a dictionary with True/False meaning
present/missing. (Whole missing sections will be replaced with ``False``)
"""
def __init__(self):
self.baseErrorClass = ConfigObjError
def check(self, check, member, missing=False):
"""A dummy check method, always returns the value unchanged."""
if missing:
raise self.baseErrorClass()
return member
def flatten_errors(cfg, res, levels=None, results=None):
"""
An example function that will turn a nested dictionary of results
(as returned by ``ConfigObj.validate``) into a flat list.
``cfg`` is the ConfigObj instance being checked, ``res`` is the results
dictionary returned by ``validate``.
(This is a recursive function, so you shouldn't use the ``levels`` or
``results`` arguments - they are used by the function.)
Returns a list of keys that failed. Each member of the list is a tuple::
([list of sections...], key, result)
If ``validate`` was called with ``preserve_errors=False`` (the default)
then ``result`` will always be ``False``.
*list of sections* is a flattened list of sections that the key was found
in.
If the section was missing (or a section was expected and a scalar provided
- or vice-versa) then key will be ``None``.
If the value (or section) was missing then ``result`` will be ``False``.
If ``validate`` was called with ``preserve_errors=True`` and a value
was present, but failed the check, then ``result`` will be the exception
object returned. You can use this as a string that describes the failure.
For example *The value "3" is of the wrong type*.
"""
if levels is None:
# first time called
levels = []
results = []
if res == True:
return sorted(results)
if res == False or isinstance(res, Exception):
results.append((levels[:], None, res))
if levels:
levels.pop()
return sorted(results)
for (key, val) in list(res.items()):
if val == True:
continue
if isinstance(cfg.get(key), collections.Mapping):
# Go down one level
levels.append(key)
flatten_errors(cfg[key], val, levels, results)
continue
results.append((levels[:], key, val))
#
# Go up one level
if levels:
levels.pop()
#
return sorted(results)
def get_extra_values(conf, _prepend=()):
"""
Find all the values and sections not in the configspec from a validated
ConfigObj.
``get_extra_values`` returns a list of tuples where each tuple represents
either an extra section, or an extra value.
The tuples contain two values, a tuple representing the section the value
is in and the name of the extra values. For extra values in the top level
section the first member will be an empty tuple. For values in the 'foo'
section the first member will be ``('foo',)``. For members in the 'bar'
subsection of the 'foo' section the first member will be ``('foo', 'bar')``.
NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't
been validated it will return an empty list.
"""
out = []
out.extend([(_prepend, name) for name in conf.extra_values])
for name in conf.sections:
if name not in conf.extra_values:
out.extend(get_extra_values(conf[name], _prepend + (name,)))
return out
"""*A programming language is a medium of expression.* - Paul Graham"""
|
326a9ca9f10cb489aae403b078ae23518d34e3037cf150922f58473e61a962c7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file defines the classes used to represent a 'coordinate', which includes
axes, ticks, tick labels, and grid lines.
"""
import numpy as np
from matplotlib.ticker import Formatter
from matplotlib.transforms import Affine2D, ScaledTranslation
from matplotlib.patches import PathPatch
from matplotlib import rcParams
from ... import units as u
from .formatter_locator import AngleFormatterLocator, ScalarFormatterLocator
from .ticks import Ticks
from .ticklabels import TickLabels
from .axislabels import AxisLabels
from .grid_paths import get_lon_lat_path, get_gridline_path
__all__ = ['CoordinateHelper']
def wrap_angle_at(values, coord_wrap):
# On ARM processors, np.mod emits warnings if there are NaN values in the
# array, although this doesn't seem to happen on other processors.
with np.errstate(invalid='ignore'):
return np.mod(values - coord_wrap, 360.) - (360. - coord_wrap)
class CoordinateHelper:
"""
Helper class to control one of the coordinates in the
:class:`~astropy.visualization.wcsaxes.WCSAxes`.
Parameters
----------
parent_axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`
The axes the coordinate helper belongs to.
parent_map : :class:`~astropy.visualization.wcsaxes.CoordinatesMap`
The :class:`~astropy.visualization.wcsaxes.CoordinatesMap` object this
coordinate belongs to.
transform : `~matplotlib.transforms.Transform`
The transform corresponding to this coordinate system.
coord_index : int
The index of this coordinate in the
:class:`~astropy.visualization.wcsaxes.CoordinatesMap`.
coord_type : {'longitude', 'latitude', 'scalar'}
The type of this coordinate, which is used to determine the wrapping and
boundary behavior of coordinates. Longitudes wrap at ``coord_wrap``,
latitudes have to be in the range -90 to 90, and scalars are unbounded
and do not wrap.
coord_unit : `~astropy.units.Unit`
The unit that this coordinate is in given the output of transform.
format_unit : `~astropy.units.Unit`, optional
The unit to use to display the coordinates.
coord_wrap : float
The angle at which the longitude wraps (defaults to 360)
frame : `~astropy.visualization.wcsaxes.frame.BaseFrame`
The frame of the :class:`~astropy.visualization.wcsaxes.WCSAxes`.
"""
def __init__(self, parent_axes=None, parent_map=None, transform=None,
coord_index=None, coord_type='scalar', coord_unit=None,
coord_wrap=None, frame=None, format_unit=None):
# Keep a reference to the parent axes and the transform
self.parent_axes = parent_axes
self.parent_map = parent_map
self.transform = transform
self.coord_index = coord_index
self.coord_unit = coord_unit
self.format_unit = format_unit
self.frame = frame
self.set_coord_type(coord_type, coord_wrap)
# Initialize ticks
self.dpi_transform = Affine2D()
self.offset_transform = ScaledTranslation(0, 0, self.dpi_transform)
self.ticks = Ticks(transform=parent_axes.transData + self.offset_transform)
# Initialize tick labels
self.ticklabels = TickLabels(self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure())
self.ticks.display_minor_ticks(False)
self.minor_frequency = 5
# Initialize axis labels
self.axislabels = AxisLabels(self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure())
# Initialize container for the grid lines
self.grid_lines = []
# Initialize grid style. Take defaults from matplotlib.rcParams.
# Based on matplotlib.axis.YTick._get_gridline.
#
# Matplotlib's gridlines use Line2D, but ours use PathPatch.
# Patches take a slightly different format of linestyle argument.
lines_to_patches_linestyle = {'-': 'solid',
'--': 'dashed',
'-.': 'dashdot',
':': 'dotted',
'none': 'none',
'None': 'none',
' ': 'none',
'': 'none'}
self.grid_lines_kwargs = {'visible': False,
'facecolor': 'none',
'edgecolor': rcParams['grid.color'],
'linestyle': lines_to_patches_linestyle[rcParams['grid.linestyle']],
'linewidth': rcParams['grid.linewidth'],
'alpha': rcParams.get('grid.alpha', 1.0),
'transform': self.parent_axes.transData}
def grid(self, draw_grid=True, grid_type='lines', **kwargs):
"""
Plot grid lines for this coordinate.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : {'lines', 'contours'}
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended.
"""
if grid_type in ('lines', 'contours'):
self._grid_type = grid_type
else:
raise ValueError("grid_type should be 'lines' or 'contours'")
if 'color' in kwargs:
kwargs['edgecolor'] = kwargs.pop('color')
self.grid_lines_kwargs.update(kwargs)
if self.grid_lines_kwargs['visible']:
if not draw_grid:
self.grid_lines_kwargs['visible'] = False
else:
self.grid_lines_kwargs['visible'] = True
def set_coord_type(self, coord_type, coord_wrap=None):
"""
Set the coordinate type for the axis.
Parameters
----------
coord_type : str
One of 'longitude', 'latitude' or 'scalar'
coord_wrap : float, optional
The value to wrap at for angular coordinates
"""
self.coord_type = coord_type
if coord_type == 'longitude' and coord_wrap is None:
self.coord_wrap = 360
elif coord_type != 'longitude' and coord_wrap is not None:
raise NotImplementedError('coord_wrap is not yet supported '
'for non-longitude coordinates')
else:
self.coord_wrap = coord_wrap
# Initialize tick formatter/locator
if coord_type == 'scalar':
self._coord_scale_to_deg = None
self._formatter_locator = ScalarFormatterLocator(unit=self.coord_unit)
elif coord_type in ['longitude', 'latitude']:
if self.coord_unit is u.deg:
self._coord_scale_to_deg = None
else:
self._coord_scale_to_deg = self.coord_unit.to(u.deg)
self._formatter_locator = AngleFormatterLocator(unit=self.coord_unit,
format_unit=self.format_unit)
else:
raise ValueError("coord_type should be one of 'scalar', 'longitude', or 'latitude'")
def set_major_formatter(self, formatter):
"""
Set the formatter to use for the major tick labels.
Parameters
----------
formatter : str or Formatter
The format or formatter to use.
"""
if isinstance(formatter, Formatter):
raise NotImplementedError() # figure out how to swap out formatter
elif isinstance(formatter, str):
self._formatter_locator.format = formatter
else:
raise TypeError("formatter should be a string or a Formatter "
"instance")
def format_coord(self, value, format='auto'):
"""
Given the value of a coordinate, will format it according to the
format of the formatter_locator.
Parameters
----------
value : float
The value to format
format : {'auto', 'ascii', 'latex'}, optional
The format to use - by default the formatting will be adjusted
depending on whether Matplotlib is using LaTeX or MathTex. To
get plain ASCII strings, use format='ascii'.
"""
if not hasattr(self, "_fl_spacing"):
return "" # _update_ticks has not been called yet
fl = self._formatter_locator
if isinstance(fl, AngleFormatterLocator):
# Convert to degrees if needed
if self._coord_scale_to_deg is not None:
value *= self._coord_scale_to_deg
if self.coord_type == 'longitude':
value = wrap_angle_at(value, self.coord_wrap)
value = value * u.degree
value = value.to_value(fl._unit)
spacing = self._fl_spacing
string = fl.formatter(values=[value] * fl._unit, spacing=spacing, format=format)
return string[0]
def set_separator(self, separator):
"""
Set the separator to use for the angle major tick labels.
Parameters
----------
separator : str or tuple or None
The separator between numbers in sexagesimal representation. Can be
either a string or a tuple (or `None` for default).
"""
if not (self._formatter_locator.__class__ == AngleFormatterLocator):
raise TypeError("Separator can only be specified for angle coordinates")
if isinstance(separator, (str, tuple)) or separator is None:
self._formatter_locator.sep = separator
else:
raise TypeError("separator should be a string, a tuple, or None")
def set_format_unit(self, unit, decimal=None, show_decimal_unit=True):
"""
Set the unit for the major tick labels.
Parameters
----------
unit : class:`~astropy.units.Unit`
The unit to which the tick labels should be converted to.
decimal : bool, optional
Whether to use decimal formatting. By default this is `False`
for degrees or hours (which therefore use sexagesimal formatting)
and `True` for all other units.
show_decimal_unit : bool, optional
Whether to include units when in decimal mode.
"""
self._formatter_locator.format_unit = u.Unit(unit)
self._formatter_locator.decimal = decimal
self._formatter_locator.show_decimal_unit = show_decimal_unit
def set_ticks(self, values=None, spacing=None, number=None, size=None,
width=None, color=None, alpha=None, exclude_overlapping=False):
"""
Set the location and properties of the ticks.
At most one of the options from ``values``, ``spacing``, or
``number`` can be specified.
Parameters
----------
values : iterable, optional
The coordinate values at which to show the ticks.
spacing : float, optional
The spacing between ticks.
number : float, optional
The approximate number of ticks shown.
size : float, optional
The length of the ticks in points
color : str or tuple
A valid Matplotlib color for the ticks
exclude_overlapping : bool, optional
Whether to exclude tick labels that overlap over each other.
"""
if sum([values is None, spacing is None, number is None]) < 2:
raise ValueError("At most one of values, spacing, or number should "
"be specified")
if values is not None:
self._formatter_locator.values = values
elif spacing is not None:
self._formatter_locator.spacing = spacing
elif number is not None:
self._formatter_locator.number = number
if size is not None:
self.ticks.set_ticksize(size)
if width is not None:
self.ticks.set_linewidth(width)
if color is not None:
self.ticks.set_color(color)
if alpha is not None:
self.ticks.set_alpha(alpha)
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
def set_ticks_position(self, position):
"""
Set where ticks should appear
Parameters
----------
position : str
The axes on which the ticks for this coordinate should appear.
Should be a string containing zero or more of ``'b'``, ``'t'``,
``'l'``, ``'r'``. For example, ``'lb'`` will lead the ticks to be
shown on the left and bottom axis.
"""
self.ticks.set_visible_axes(position)
def set_ticks_visible(self, visible):
"""
Set whether ticks are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide ticks
along this coordinate.
"""
self.ticks.set_visible(visible)
def set_ticklabel(self, **kwargs):
"""
Set the visual properties for the tick labels.
Parameters
----------
kwargs
Keyword arguments are passed to :class:`matplotlib.text.Text`. These
can include keywords to set the ``color``, ``size``, ``weight``, and
other text properties.
"""
self.ticklabels.set(**kwargs)
def set_ticklabel_position(self, position):
"""
Set where tick labels should appear
Parameters
----------
position : str
The axes on which the tick labels for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
tick labels to be shown on the left and bottom axis.
"""
self.ticklabels.set_visible_axes(position)
def set_ticklabel_visible(self, visible):
"""
Set whether the tick labels are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide this
coordinate's tick labels.
"""
self.ticklabels.set_visible(visible)
def set_axislabel(self, text, minpad=1, **kwargs):
"""
Set the text and optionally visual properties for the axis label.
Parameters
----------
text : str
The axis label text.
minpad : float, optional
The padding for the label in terms of axis label font size.
kwargs
Keywords are passed to :class:`matplotlib.text.Text`. These
can include keywords to set the ``color``, ``size``, ``weight``, and
other text properties.
"""
self.axislabels.set_text(text)
self.axislabels.set_minpad(minpad)
self.axislabels.set(**kwargs)
def get_axislabel(self):
"""
Get the text for the axis label
Returns
-------
label : str
The axis label
"""
return self.axislabels.get_text()
def set_axislabel_position(self, position):
"""
Set where axis labels should appear
Parameters
----------
position : str
The axes on which the axis label for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
axis label to be shown on the left and bottom axis.
"""
self.axislabels.set_visible_axes(position)
def set_axislabel_visibility_rule(self, rule):
"""
Set the rule used to determine when the axis label is drawn.
Parameters
----------
rule : str
If the rule is 'always' axis labels will always be drawn on the
axis. If the rule is 'ticks' the label will only be drawn if ticks
were drawn on that axis. If the rule is 'labels' the axis label
will only be drawn if tick labels were drawn on that axis.
"""
self.axislabels.set_visibility_rule(rule)
def get_axislabel_visibility_rule(self, rule):
"""
Get the rule used to determine when the axis label is drawn.
"""
return self.axislabels.get_visibility_rule()
@property
def locator(self):
return self._formatter_locator.locator
@property
def formatter(self):
return self._formatter_locator.formatter
def _draw_grid(self, renderer):
renderer.open_group('grid lines')
self._update_ticks()
if self.grid_lines_kwargs['visible']:
if self._grid_type == 'lines':
self._update_grid_lines()
else:
self._update_grid_contour()
if self._grid_type == 'lines':
frame_patch = self.frame.patch
for path in self.grid_lines:
p = PathPatch(path, **self.grid_lines_kwargs)
p.set_clip_path(frame_patch)
p.draw(renderer)
elif self._grid is not None:
for line in self._grid.collections:
line.set(**self.grid_lines_kwargs)
line.draw(renderer)
renderer.close_group('grid lines')
def _draw_ticks(self, renderer, bboxes, ticklabels_bbox, ticks_locs):
renderer.open_group('ticks')
self.ticks.draw(renderer, ticks_locs)
self.ticklabels.draw(renderer, bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox)
renderer.close_group('ticks')
def _draw_axislabels(self, renderer, bboxes, ticklabels_bbox, ticks_locs, visible_ticks):
renderer.open_group('axis labels')
self.axislabels.draw(renderer, bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
coord_ticklabels_bbox=ticklabels_bbox[self],
ticks_locs=ticks_locs,
visible_ticks=visible_ticks)
renderer.close_group('axis labels')
def _update_ticks(self):
# TODO: this method should be optimized for speed
# Here we determine the location and rotation of all the ticks. For
# each axis, we can check the intersections for the specific
# coordinate and once we have the tick positions, we can use the WCS
# to determine the rotations.
# Find the range of coordinates in all directions
coord_range = self.parent_map.get_coord_range()
# First find the ticks we want to show
tick_world_coordinates, self._fl_spacing = self.locator(*coord_range[self.coord_index])
if self.ticks.get_display_minor_ticks():
minor_ticks_w_coordinates = self._formatter_locator.minor_locator(self._fl_spacing, self.get_minor_frequency(), *coord_range[self.coord_index])
# We want to allow non-standard rectangular frames, so we just rely on
# the parent axes to tell us what the bounding frame is.
from . import conf
frame = self.frame.sample(conf.frame_boundary_samples)
self.ticks.clear()
self.ticklabels.clear()
self.lblinfo = []
self.lbl_world = []
# Look up parent axes' transform from data to figure coordinates.
#
# See:
# http://matplotlib.org/users/transforms_tutorial.html#the-transformation-pipeline
transData = self.parent_axes.transData
invertedTransLimits = transData.inverted()
for axis, spine in frame.items():
# Determine tick rotation in display coordinates and compare to
# the normal angle in display coordinates.
pixel0 = spine.data
world0 = spine.world[:, self.coord_index]
world0 = self.transform.transform(pixel0)[:, self.coord_index]
axes0 = transData.transform(pixel0)
# Advance 2 pixels in figure coordinates
pixel1 = axes0.copy()
pixel1[:, 0] += 2.0
pixel1 = invertedTransLimits.transform(pixel1)
world1 = self.transform.transform(pixel1)[:, self.coord_index]
# Advance 2 pixels in figure coordinates
pixel2 = axes0.copy()
pixel2[:, 1] += 2.0 if self.frame.origin == 'lower' else -2.0
pixel2 = invertedTransLimits.transform(pixel2)
world2 = self.transform.transform(pixel2)[:, self.coord_index]
dx = (world1 - world0)
dy = (world2 - world0)
# Rotate by 90 degrees
dx, dy = -dy, dx
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
dx *= self._coord_scale_to_deg
dy *= self._coord_scale_to_deg
# Here we wrap at 180 not self.coord_wrap since we want to
# always ensure abs(dx) < 180 and abs(dy) < 180
dx = wrap_angle_at(dx, 180.)
dy = wrap_angle_at(dy, 180.)
tick_angle = np.degrees(np.arctan2(dy, dx))
normal_angle_full = np.hstack([spine.normal_angle, spine.normal_angle[-1]])
with np.errstate(invalid='ignore'):
reset = (((normal_angle_full - tick_angle) % 360 > 90.) &
((tick_angle - normal_angle_full) % 360 > 90.))
tick_angle[reset] -= 180.
# We find for each interval the starting and ending coordinate,
# ensuring that we take wrapping into account correctly for
# longitudes.
w1 = spine.world[:-1, self.coord_index]
w2 = spine.world[1:, self.coord_index]
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
w1 = w1 * self._coord_scale_to_deg
w2 = w2 * self._coord_scale_to_deg
w1 = wrap_angle_at(w1, self.coord_wrap)
w2 = wrap_angle_at(w2, self.coord_wrap)
with np.errstate(invalid='ignore'):
w1[w2 - w1 > 180.] += 360
w2[w1 - w2 > 180.] += 360
if self._coord_scale_to_deg is not None:
w1 = w1 / self._coord_scale_to_deg
w2 = w2 / self._coord_scale_to_deg
# For longitudes, we need to check ticks as well as ticks + 360,
# since the above can produce pairs such as 359 to 361 or 0.5 to
# 1.5, both of which would match a tick at 0.75. Otherwise we just
# check the ticks determined above.
self._compute_ticks(tick_world_coordinates, spine, axis, w1, w2, tick_angle)
if self.ticks.get_display_minor_ticks():
self._compute_ticks(minor_ticks_w_coordinates, spine, axis, w1,
w2, tick_angle, ticks='minor')
# format tick labels, add to scene
text = self.formatter(self.lbl_world * tick_world_coordinates.unit, spacing=self._fl_spacing)
for kwargs, txt in zip(self.lblinfo, text):
self.ticklabels.add(text=txt, **kwargs)
def _compute_ticks(self, tick_world_coordinates, spine, axis, w1, w2,
tick_angle, ticks='major'):
if self.coord_type == 'longitude':
tick_world_coordinates_values = tick_world_coordinates.to_value(u.deg)
tick_world_coordinates_values = np.hstack([tick_world_coordinates_values,
tick_world_coordinates_values + 360])
tick_world_coordinates_values *= u.deg.to(self.coord_unit)
else:
tick_world_coordinates_values = tick_world_coordinates.to_value(self.coord_unit)
for t in tick_world_coordinates_values:
# Find steps where a tick is present. We have to check
# separately for the case where the tick falls exactly on the
# frame points, otherwise we'll get two matches, one for w1 and
# one for w2.
with np.errstate(invalid='ignore'):
intersections = np.hstack([np.nonzero((t - w1) == 0)[0],
np.nonzero(((t - w1) * (t - w2)) < 0)[0]])
# But we also need to check for intersection with the last w2
if t - w2[-1] == 0:
intersections = np.append(intersections, len(w2) - 1)
# Loop over ticks, and find exact pixel coordinates by linear
# interpolation
for imin in intersections:
imax = imin + 1
if np.allclose(w1[imin], w2[imin], rtol=1.e-13, atol=1.e-13):
continue # tick is exactly aligned with frame
else:
frac = (t - w1[imin]) / (w2[imin] - w1[imin])
x_data_i = spine.data[imin, 0] + frac * (spine.data[imax, 0] - spine.data[imin, 0])
y_data_i = spine.data[imin, 1] + frac * (spine.data[imax, 1] - spine.data[imin, 1])
x_pix_i = spine.pixel[imin, 0] + frac * (spine.pixel[imax, 0] - spine.pixel[imin, 0])
y_pix_i = spine.pixel[imin, 1] + frac * (spine.pixel[imax, 1] - spine.pixel[imin, 1])
delta_angle = tick_angle[imax] - tick_angle[imin]
if delta_angle > 180.:
delta_angle -= 360.
elif delta_angle < -180.:
delta_angle += 360.
angle_i = tick_angle[imin] + frac * delta_angle
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
t *= self._coord_scale_to_deg
world = wrap_angle_at(t, self.coord_wrap)
if self._coord_scale_to_deg is not None:
world /= self._coord_scale_to_deg
else:
world = t
if ticks == 'major':
self.ticks.add(axis=axis,
pixel=(x_data_i, y_data_i),
world=world,
angle=angle_i,
axis_displacement=imin + frac)
# store information to pass to ticklabels.add
# it's faster to format many ticklabels at once outside
# of the loop
self.lblinfo.append(dict(axis=axis,
pixel=(x_pix_i, y_pix_i),
world=world,
angle=spine.normal_angle[imin],
axis_displacement=imin + frac))
self.lbl_world.append(world)
else:
self.ticks.add_minor(minor_axis=axis,
minor_pixel=(x_data_i, y_data_i),
minor_world=world,
minor_angle=angle_i,
minor_axis_displacement=imin + frac)
def display_minor_ticks(self, display_minor_ticks):
"""
Display minor ticks for this coordinate.
Parameters
----------
display_minor_ticks : bool
Whether or not to display minor ticks.
"""
self.ticks.display_minor_ticks(display_minor_ticks)
def get_minor_frequency(self):
return self.minor_frequency
def set_minor_frequency(self, frequency):
"""
Set the frequency of minor ticks per major ticks.
Parameters
----------
frequency : int
The number of minor ticks per major ticks.
"""
self.minor_frequency = frequency
def _update_grid_lines(self):
# For 3-d WCS with a correlated third axis, the *proper* way of
# drawing a grid should be to find the world coordinates of all pixels
# and drawing contours. What we are doing here assumes that we can
# define the grid lines with just two of the coordinates (and
# therefore assumes that the other coordinates are fixed and set to
# the value in the slice). Here we basically assume that if the WCS
# had a third axis, it has been abstracted away in the transformation.
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
tick_world_coordinates_values = tick_world_coordinates.to_value(self.coord_unit)
n_coord = len(tick_world_coordinates_values)
from . import conf
n_samples = conf.grid_samples
xy_world = np.zeros((n_samples * n_coord, 2))
self.grid_lines = []
for iw, w in enumerate(tick_world_coordinates_values):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
if self.coord_index == 0:
xy_world[subset, 0] = np.repeat(w, n_samples)
xy_world[subset, 1] = np.linspace(coord_range[1][0], coord_range[1][1], n_samples)
else:
xy_world[subset, 0] = np.linspace(coord_range[0][0], coord_range[0][1], n_samples)
xy_world[subset, 1] = np.repeat(w, n_samples)
# We now convert all the world coordinates to pixel coordinates in a
# single go rather than doing this in the gridline to path conversion
# to fully benefit from vectorized coordinate transformations.
# Transform line to pixel coordinates
pixel = self.transform.inverted().transform(xy_world)
# Create round-tripped values for checking
xy_world_round = self.transform.transform(pixel)
for iw in range(n_coord):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
self.grid_lines.append(self._get_gridline(xy_world[subset], pixel[subset], xy_world_round[subset]))
def _get_gridline(self, xy_world, pixel, xy_world_round):
if self.coord_type == 'scalar':
return get_gridline_path(xy_world, pixel)
else:
return get_lon_lat_path(xy_world, pixel, xy_world_round)
def _update_grid_contour(self):
if hasattr(self, '_grid'):
for line in self._grid.collections:
line.remove()
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x, y, field = self.transform.get_coord_slices(xmin, xmax, ymin, ymax, 200, 200)
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
field = field[self.coord_index]
# tick_world_coordinates is a Quantities array and we only needs its values
tick_world_coordinates_values = tick_world_coordinates.value
if self.coord_type == 'longitude':
# Find biggest gap in tick_world_coordinates and wrap in middle
# For now just assume spacing is equal, so any mid-point will do
mid = 0.5 * (tick_world_coordinates_values[0] + tick_world_coordinates_values[1])
field = wrap_angle_at(field, mid)
tick_world_coordinates_values = wrap_angle_at(tick_world_coordinates_values, mid)
# Replace wraps by NaN
reset = (np.abs(np.diff(field[:, :-1], axis=0)) > 180) | (np.abs(np.diff(field[:-1, :], axis=1)) > 180)
field[:-1, :-1][reset] = np.nan
field[1:, :-1][reset] = np.nan
field[:-1, 1:][reset] = np.nan
field[1:, 1:][reset] = np.nan
if len(tick_world_coordinates_values) > 0:
self._grid = self.parent_axes.contour(x, y, field.transpose(), levels=np.sort(tick_world_coordinates_values))
else:
self._grid = None
|
944a272cb155f5347ec67cb68345101a3463a13e5e18f92ba30d1e41fcb2e8dd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file defines the AngleFormatterLocator class which is a class that
# provides both a method for a formatter and one for a locator, for a given
# label spacing. The advantage of keeping the two connected is that we need to
# make sure that the formatter can correctly represent the spacing requested and
# vice versa. For example, a format of dd:mm cannot work with a tick spacing
# that is not a multiple of one arcminute.
import re
import warnings
import numpy as np
from matplotlib import rcParams
from ... import units as u
from ...units import UnitsError
from ...coordinates import Angle
DMS_RE = re.compile('^dd(:mm(:ss(.(s)+)?)?)?$')
HMS_RE = re.compile('^hh(:mm(:ss(.(s)+)?)?)?$')
DDEC_RE = re.compile('^d(.(d)+)?$')
DMIN_RE = re.compile('^m(.(m)+)?$')
DSEC_RE = re.compile('^s(.(s)+)?$')
SCAL_RE = re.compile('^x(.(x)+)?$')
# Units with custom representations - see the note where it is used inside
# AngleFormatterLocator.formatter for more details.
CUSTOM_UNITS = {
u.degree: u.def_unit('custom_degree', represents=u.degree,
format={'generic': '\xb0',
'latex': r'^\circ',
'unicode': '°'}),
u.arcmin: u.def_unit('custom_arcmin', represents=u.arcmin,
format={'generic': "'",
'latex': r'^\prime',
'unicode': '′'}),
u.arcsec: u.def_unit('custom_arcsec', represents=u.arcsec,
format={'generic': '"',
'latex': r'^{\prime\prime}',
'unicode': '″'}),
u.hourangle: u.def_unit('custom_hourangle', represents=u.hourangle,
format={'generic': 'h',
'latex': r'^\mathrm{h}',
'unicode': r'$\mathregular{^h}$'})}
class BaseFormatterLocator:
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None):
if (values, number, spacing).count(None) < 2:
raise ValueError("At most one of values/number/spacing can be specifed")
if values is not None:
self.values = values
elif number is not None:
self.number = number
elif spacing is not None:
self.spacing = spacing
else:
self.number = 5
self.format = format
@property
def values(self):
return self._values
@values.setter
def values(self, values):
if not isinstance(values, u.Quantity) or (not values.ndim == 1):
raise TypeError("values should be an astropy.units.Quantity array")
if not values.unit.is_equivalent(self._unit):
raise UnitsError("value should be in units compatible with "
"coordinate units ({0}) but found {1}".format(self._unit, values.unit))
self._number = None
self._spacing = None
self._values = values
@property
def number(self):
return self._number
@number.setter
def number(self, number):
self._number = number
self._spacing = None
self._values = None
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
self._number = None
self._spacing = spacing
self._values = None
def minor_locator(self, spacing, frequency, value_min, value_max):
if self.values is not None:
return [] * self._unit
minor_spacing = spacing.value / frequency
values = self._locate_values(value_min, value_max, minor_spacing)
index = np.where((values % frequency) == 0)
index = index[0][0]
values = np.delete(values, np.s_[index::frequency])
return values * minor_spacing * self._unit
@property
def format_unit(self):
return self._format_unit
@format_unit.setter
def format_unit(self, unit):
self._format_unit = u.Unit(unit)
@staticmethod
def _locate_values(value_min, value_max, spacing):
imin = np.ceil(value_min / spacing)
imax = np.floor(value_max / spacing)
values = np.arange(imin, imax + 1, dtype=int)
return values
class AngleFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None,
unit=None, decimal=None, format_unit=None, show_decimal_unit=True):
if unit is None:
unit = u.degree
if format_unit is None:
format_unit = unit
if format_unit not in (u.degree, u.hourangle, u.hour):
if decimal is False:
raise UnitsError("Units should be degrees or hours when using non-decimal (sexagesimal) mode")
self._unit = unit
self._format_unit = format_unit or unit
self._decimal = decimal
self._sep = None
self.show_decimal_unit = show_decimal_unit
super().__init__(values=values, number=number, spacing=spacing,
format=format)
@property
def decimal(self):
decimal = self._decimal
if self.format_unit not in (u.degree, u.hourangle, u.hour):
if self._decimal is None:
decimal = True
elif self._decimal is False:
raise UnitsError("Units should be degrees or hours when using non-decimal (sexagesimal) mode")
elif self._decimal is None:
decimal = False
return decimal
@decimal.setter
def decimal(self, value):
self._decimal = value
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and (not isinstance(spacing, u.Quantity) or
spacing.unit.physical_type != 'angle'):
raise TypeError("spacing should be an astropy.units.Quantity "
"instance with units of angle")
self._number = None
self._spacing = spacing
self._values = None
@property
def sep(self):
return self._sep
@sep.setter
def sep(self, separator):
self._sep = separator
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if DMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.degree
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif HMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.hourangle
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif DDEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.degree
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DMIN_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcmin
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DSEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcsec
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
else:
raise ValueError("Invalid format: {0}".format(value))
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
@property
def base_spacing(self):
if self.decimal:
spacing = self._format_unit / (10. ** self._precision)
else:
if self._fields == 1:
spacing = 1. * u.degree
elif self._fields == 2:
spacing = 1. * u.arcmin
elif self._fields == 3:
if self._precision == 0:
spacing = 1. * u.arcsec
else:
spacing = u.arcsec / (10. ** self._precision)
if self._format_unit is u.hourangle:
spacing *= 15
return spacing
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * u.arcsec
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced).
if value_min == value_max:
return [] * self._unit, 0 * self._unit
if self.spacing is not None:
# spacing was manually specified
spacing_value = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if self.format is not None and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing_value = self.base_spacing.to_value(self._unit)
else:
# otherwise we clip to the nearest 'sensible' spacing
if self.decimal:
from .utils import select_step_scalar
spacing_value = select_step_scalar(dv.to_value(self._format_unit)) * self._format_unit.to(self._unit)
else:
if self._format_unit is u.degree:
from .utils import select_step_degree
spacing_value = select_step_degree(dv).to_value(self._unit)
else:
from .utils import select_step_hour
spacing_value = select_step_hour(dv).to_value(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this.
values = self._locate_values(value_min, value_max, spacing_value)
return values * spacing_value * self._unit, spacing_value * self._unit
def formatter(self, values, spacing, format='auto'):
if not isinstance(values, u.Quantity) and values is not None:
raise TypeError("values should be a Quantities array")
if len(values) > 0:
decimal = self.decimal
unit = self._format_unit
if unit is u.hour:
unit = u.hourangle
if self.format is None:
if decimal:
# Here we assume the spacing can be arbitrary, so for example
# 1.000223 degrees, in which case we don't want to have a
# format that rounds to degrees. So we find the number of
# decimal places we get from representing the spacing as a
# string in the desired units. The easiest way to find
# the smallest number of decimal places required is to
# format the number as a decimal float and strip any zeros
# from the end. We do this rather than just trusting e.g.
# str() because str(15.) == 15.0. We format using 10 decimal
# places by default before stripping the zeros since this
# corresponds to a resolution of less than a microarcecond,
# which should be sufficient.
spacing = spacing.to_value(unit)
fields = 0
precision = len("{0:.10f}".format(spacing).replace('0', ' ').strip().split('.', 1)[1])
else:
spacing = spacing.to_value(unit / 3600)
if spacing >= 3600:
fields = 1
precision = 0
elif spacing >= 60:
fields = 2
precision = 0
elif spacing >= 1:
fields = 3
precision = 0
else:
fields = 3
precision = -int(np.floor(np.log10(spacing)))
else:
fields = self._fields
precision = self._precision
is_latex = format == 'latex' or (format == 'auto' and rcParams['text.usetex'])
if decimal:
# At the moment, the Angle class doesn't have a consistent way
# to always convert angles to strings in decimal form with
# symbols for units (instead of e.g 3arcsec). So as a workaround
# we take advantage of the fact that Angle.to_string converts
# the unit to a string manually when decimal=False and the unit
# is not strictly u.degree or u.hourangle
if self.show_decimal_unit:
decimal = False
sep = 'fromunit'
if is_latex:
fmt = 'latex'
else:
if unit is u.hourangle:
fmt = 'unicode'
else:
fmt = None
unit = CUSTOM_UNITS.get(unit, unit)
else:
sep = None
fmt = None
elif self.sep is not None:
sep = self.sep
fmt = None
else:
sep = 'fromunit'
if unit == u.degree:
if is_latex:
fmt = 'latex'
else:
sep = ('\xb0', "'", '"')
fmt = None
else:
if format == 'ascii':
fmt = None
elif is_latex:
fmt = 'latex'
else:
# Here we still use LaTeX but this is for Matplotlib's
# LaTeX engine - we can't use fmt='latex' as this
# doesn't produce LaTeX output that respects the fonts.
sep = (r'$\mathregular{^h}$', r'$\mathregular{^m}$', r'$\mathregular{^s}$')
fmt = None
angles = Angle(values)
string = angles.to_string(unit=unit,
precision=precision,
decimal=decimal,
fields=fields,
sep=sep,
format=fmt).tolist()
return string
else:
return []
class ScalarFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None,
unit=None, format_unit=None):
if unit is not None:
self._unit = unit
self._format_unit = format_unit or unit
elif spacing is not None:
self._unit = spacing.unit
self._format_unit = format_unit or spacing.unit
elif values is not None:
self._unit = values.unit
self._format_unit = format_unit or values.unit
super().__init__(values=values, number=number, spacing=spacing,
format=format)
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and not isinstance(spacing, u.Quantity):
raise TypeError("spacing should be an astropy.units.Quantity instance")
self._number = None
self._spacing = spacing
self._values = None
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if SCAL_RE.match(value) is not None:
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
elif not value.startswith('%'):
raise ValueError("Invalid format: {0}".format(value))
@property
def base_spacing(self):
return self._format_unit / (10. ** self._precision)
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * self._unit
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced).
if value_min == value_max:
return [] * self._unit, 0 * self._unit
if self.spacing is not None:
# spacing was manually specified
spacing = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if self.format is not None and (not self.format.startswith('%')) and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing = self.base_spacing.to_value(self._unit)
else:
from .utils import select_step_scalar
spacing = select_step_scalar(dv.to_value(self._format_unit)) * self._format_unit.to(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this
values = self._locate_values(value_min, value_max, spacing)
return values * spacing * self._unit, spacing * self._unit
def formatter(self, values, spacing, format='auto'):
if len(values) > 0:
if self.format is None:
if spacing.value < 1.:
precision = -int(np.floor(np.log10(spacing.value)))
else:
precision = 0
elif self.format.startswith('%'):
return [(self.format % x.value) for x in values]
else:
precision = self._precision
return [("{0:." + str(precision) + "f}").format(x.to_value(self._format_unit)) for x in values]
else:
return []
|
a9ab36805f8a8873eebec810f2bdd3fe17efe259b6f977860bc1086a8668e41a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from functools import partial
from collections import defaultdict
import numpy as np
from matplotlib.artist import Artist
from matplotlib.axes import Axes, subplot_class_factory
from matplotlib.transforms import Affine2D, Bbox, Transform
from ...coordinates import SkyCoord, BaseCoordinateFrame
from ...wcs import WCS
from ...wcs.utils import wcs_to_celestial_frame
from .transforms import (WCSPixel2WorldTransform, WCSWorld2PixelTransform,
CoordinateTransform)
from .coordinates_map import CoordinatesMap
from .utils import get_coord_meta, transform_contour_set_inplace
from .frame import EllipticalFrame, RectangularFrame
__all__ = ['WCSAxes', 'WCSAxesSubplot']
VISUAL_PROPERTIES = ['facecolor', 'edgecolor', 'linewidth', 'alpha', 'linestyle']
IDENTITY = WCS(naxis=2)
IDENTITY.wcs.ctype = ["X", "Y"]
IDENTITY.wcs.crval = [0., 0.]
IDENTITY.wcs.crpix = [1., 1.]
IDENTITY.wcs.cdelt = [1., 1.]
class _WCSAxesArtist(Artist):
"""This is a dummy artist to enforce the correct z-order of axis ticks,
tick labels, and gridlines.
FIXME: This is a bit of a hack. ``Axes.draw`` sorts the artists by zorder
and then renders them in sequence. For normal Matplotlib axes, the ticks,
tick labels, and gridlines are included in this list of artists and hence
are automatically drawn in the correct order. However, ``WCSAxes`` disables
the native ticks, labels, and gridlines. Instead, ``WCSAxes.draw`` renders
ersatz ticks, labels, and gridlines by explicitly calling the functions
``CoordinateHelper._draw_ticks``, ``CoordinateHelper._draw_grid``, etc.
This hack would not be necessary if ``WCSAxes`` drew ticks, tick labels,
and gridlines in the standary way."""
def draw(self, renderer, *args, **kwargs):
self.axes.draw_wcsaxes(renderer)
class WCSAxes(Axes):
"""
The main axes class that can be used to show world coordinates from a WCS.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure to add the axes to
rect : list
The position of the axes in the figure in relative units. Should be
given as ``[left, bottom, width, height]``.
wcs : :class:`~astropy.wcs.WCS`, optional
The WCS for the data. If this is specified, ``transform`` cannot be
specified.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data. If this is specified, ``wcs`` cannot be
specified.
coord_meta : dict, optional
A dictionary providing additional metadata when ``transform`` is
specified. This should include the keys ``type``, ``wrap``, and
``unit``. Each of these should be a list with as many items as the
dimension of the WCS. The ``type`` entries should be one of
``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should
give, for the longitude, the angle at which the coordinate wraps (and
`None` otherwise), and the ``unit`` should give the unit of the
coordinates as :class:`~astropy.units.Unit` instances.
transData : `~matplotlib.transforms.Transform`, optional
Can be used to override the default data -> pixel mapping.
slices : tuple, optional
For WCS transformations with more than two dimensions, we need to
choose which dimensions are being shown in the 2D image. The slice
should contain one ``x`` entry, one ``y`` entry, and the rest of the
values should be integers indicating the slice through the data. The
order of the items in the slice should be the same as the order of the
dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the
order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means
that the first WCS dimension (last Numpy dimension) will be sliced at
an index of 50, the second WCS and Numpy dimension will be shown on the
x axis, and the final WCS dimension (first Numpy dimension) will be
shown on the y-axis (and therefore the data will be plotted using
``data[:, :, 50].transpose()``)
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
"""
def __init__(self, fig, rect, wcs=None, transform=None, coord_meta=None,
transData=None, slices=None, frame_class=RectangularFrame,
**kwargs):
super().__init__(fig, rect, **kwargs)
self._bboxes = []
self.frame_class = frame_class
if not (transData is None):
# User wants to override the transform for the final
# data->pixel mapping
self.transData = transData
self.reset_wcs(wcs=wcs, slices=slices, transform=transform, coord_meta=coord_meta)
self._hide_parent_artists()
self.format_coord = self._display_world_coords
self._display_coords_index = 0
fig.canvas.mpl_connect('key_press_event', self._set_cursor_prefs)
self.patch = self.coords.frame.patch
self._wcsaxesartist = _WCSAxesArtist()
self.add_artist(self._wcsaxesartist)
self._drawn = False
def _display_world_coords(self, x, y):
if not self._drawn:
return ""
if self._display_coords_index == -1:
return "%s %s (pixel)" % (x, y)
pixel = np.array([x, y])
coords = self._all_coords[self._display_coords_index]
world = coords._transform.transform(np.array([pixel]))[0]
xw = coords[self._x_index].format_coord(world[self._x_index], format='ascii')
yw = coords[self._y_index].format_coord(world[self._y_index], format='ascii')
if self._display_coords_index == 0:
system = "world"
else:
system = "world, overlay {0}".format(self._display_coords_index)
coord_string = "%s %s (%s)" % (xw, yw, system)
return coord_string
def _set_cursor_prefs(self, event, **kwargs):
if event.key == 'w':
self._display_coords_index += 1
if self._display_coords_index + 1 > len(self._all_coords):
self._display_coords_index = -1
def _hide_parent_artists(self):
# Turn off spines and current axes
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
self.yaxis.set_visible(False)
# We now overload ``imshow`` because we need to make sure that origin is
# set to ``lower`` for all images, which means that we need to flip RGB
# images.
def imshow(self, X, *args, **kwargs):
"""
Wrapper to Matplotlib's :meth:`~matplotlib.axes.Axes.imshow`.
If an RGB image is passed as a PIL object, it will be flipped
vertically and ``origin`` will be set to ``lower``, since WCS
transformations - like FITS files - assume that the origin is the lower
left pixel of the image (whereas RGB images have the origin in the top
left).
All arguments are passed to :meth:`~matplotlib.axes.Axes.imshow`.
"""
origin = kwargs.get('origin', 'lower')
if origin == 'upper':
raise ValueError("Cannot use images with origin='upper' in WCSAxes.")
# To check whether the image is a PIL image we can check if the data
# has a 'getpixel' attribute - this is what Matplotlib's AxesImage does
try:
from PIL.Image import Image, FLIP_TOP_BOTTOM
except ImportError:
# We don't need to worry since PIL is not installed, so user cannot
# have passed RGB image.
pass
else:
if isinstance(X, Image) or hasattr(X, 'getpixel'):
X = X.transpose(FLIP_TOP_BOTTOM)
kwargs['origin'] = 'lower'
return super().imshow(X, *args, **kwargs)
def contour(self, *args, **kwargs):
"""
Plot contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contour`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contour`.
"""
# In Matplotlib, when calling contour() with a transform, each
# individual path in the contour map is transformed separately. However,
# this is much too slow for us since each call to the transforms results
# in an Astropy coordinate transformation, which has a non-negligible
# overhead - therefore a better approach is to override contour(), call
# the Matplotlib one with no transform, then apply the transform in one
# go to all the segments that make up the contour map.
transform = kwargs.pop('transform', None)
cset = super(WCSAxes, self).contour(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
cset = transform_contour_set_inplace(cset, transform)
return cset
def contourf(self, *args, **kwargs):
"""
Plot filled contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contourf`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contourf`.
"""
# See notes for contour above.
transform = kwargs.pop('transform', None)
cset = super(WCSAxes, self).contourf(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
cset = transform_contour_set_inplace(cset, transform)
return cset
def plot_coord(self, *args, **kwargs):
"""
Plot `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.plot_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.plot`. All other arguments are the same as
`matplotlib.axes.Axes.plot`. If not specified a ``transform`` keyword
argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to plot on the axes. This is converted to the
first two arguments to `matplotlib.axes.Axes.plot`.
See Also
--------
matplotlib.axes.Axes.plot : This method is called from this function with all arguments passed to it.
"""
if isinstance(args[0], (SkyCoord, BaseCoordinateFrame)):
# Extract the frame from the first argument.
frame0 = args[0]
if isinstance(frame0, SkyCoord):
frame0 = frame0.frame
plot_data = []
for coord in self.coords:
if coord.coord_type == 'longitude':
plot_data.append(frame0.data.lon.to_value(coord.coord_unit))
elif coord.coord_type == 'latitude':
plot_data.append(frame0.data.lat.to_value(coord.coord_unit))
else:
raise NotImplementedError("Coordinates cannot be plotted with this "
"method because the WCS does not represent longitude/latitude.")
if 'transform' in kwargs.keys():
raise TypeError("The 'transform' keyword argument is not allowed,"
" as it is automatically determined by the input coordinate frame.")
transform = self.get_transform(frame0)
kwargs.update({'transform': transform})
args = tuple(plot_data) + args[1:]
super().plot(*args, **kwargs)
def reset_wcs(self, wcs=None, slices=None, transform=None, coord_meta=None):
"""
Reset the current Axes, to use a new WCS object.
"""
# Here determine all the coordinate axes that should be shown.
if wcs is None and transform is None:
self.wcs = IDENTITY
else:
# We now force call 'set', which ensures the WCS object is
# consistent, which will only be important if the WCS has been set
# by hand. For example if the user sets a celestial WCS by hand and
# forgets to set the units, WCS.wcs.set() will do this.
if wcs is not None:
wcs.wcs.set()
self.wcs = wcs
# If we are making a new WCS, we need to preserve the path object since
# it may already be used by objects that have been plotted, and we need
# to continue updating it. CoordinatesMap will create a new frame
# instance, but we can tell that instance to keep using the old path.
if hasattr(self, 'coords'):
previous_frame = {'path': self.coords.frame._path,
'color': self.coords.frame.get_color(),
'linewidth': self.coords.frame.get_linewidth()}
else:
previous_frame = {'path': None}
self.coords = CoordinatesMap(self, wcs=self.wcs, slice=slices,
transform=transform, coord_meta=coord_meta,
frame_class=self.frame_class,
previous_frame_path=previous_frame['path'])
if previous_frame['path'] is not None:
self.coords.frame.set_color(previous_frame['color'])
self.coords.frame.set_linewidth(previous_frame['linewidth'])
self._all_coords = [self.coords]
if slices is None:
self.slices = ('x', 'y')
self._x_index = 0
self._y_index = 1
else:
self.slices = slices
self._x_index = self.slices.index('x')
self._y_index = self.slices.index('y')
# Common default settings for Rectangular Frame
if self.frame_class is RectangularFrame:
for coord_index in range(len(self.slices)):
if self.slices[coord_index] == 'x':
self.coords[coord_index].set_axislabel_position('b')
self.coords[coord_index].set_ticklabel_position('b')
elif self.slices[coord_index] == 'y':
self.coords[coord_index].set_axislabel_position('l')
self.coords[coord_index].set_ticklabel_position('l')
else:
self.coords[coord_index].set_axislabel_position('')
self.coords[coord_index].set_ticklabel_position('')
self.coords[coord_index].set_ticks_position('')
# Common default settings for Elliptical Frame
elif self.frame_class is EllipticalFrame:
for coord_index in range(len(self.slices)):
if self.slices[coord_index] == 'x':
self.coords[coord_index].set_axislabel_position('h')
self.coords[coord_index].set_ticklabel_position('h')
self.coords[coord_index].set_ticks_position('h')
elif self.slices[coord_index] == 'y':
self.coords[coord_index].set_ticks_position('c')
self.coords[coord_index].set_axislabel_position('c')
self.coords[coord_index].set_ticklabel_position('c')
else:
self.coords[coord_index].set_axislabel_position('')
self.coords[coord_index].set_ticklabel_position('')
self.coords[coord_index].set_ticks_position('')
def draw_wcsaxes(self, renderer):
# Here need to find out range of all coordinates, and update range for
# each coordinate axis. For now, just assume it covers the whole sky.
self._bboxes = []
# This generates a structure like [coords][axis] = [...]
ticklabels_bbox = defaultdict(partial(defaultdict, list))
ticks_locs = defaultdict(partial(defaultdict, list))
visible_ticks = []
for coords in self._all_coords:
coords.frame.update()
for coord in coords:
coord._draw_grid(renderer)
for coords in self._all_coords:
for coord in coords:
coord._draw_ticks(renderer, bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox[coord],
ticks_locs=ticks_locs[coord])
visible_ticks.extend(coord.ticklabels.get_visible_axes())
for coords in self._all_coords:
for coord in coords:
coord._draw_axislabels(renderer, bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox,
ticks_locs=ticks_locs[coord],
visible_ticks=visible_ticks)
self.coords.frame.draw(renderer)
def draw(self, renderer, inframe=False):
# In Axes.draw, the following code can result in the xlim and ylim
# values changing, so we need to force call this here to make sure that
# the limits are correct before we update the patch.
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
if self._axisbelow is True:
self._wcsaxesartist.set_zorder(0.5)
elif self._axisbelow is False:
self._wcsaxesartist.set_zorder(2.5)
else:
# 'line': above patches, below lines
self._wcsaxesartist.set_zorder(1.5)
# We need to make sure that that frame path is up to date
self.coords.frame._update_patch_path()
super().draw(renderer, inframe=inframe)
self._drawn = True
def set_xlabel(self, label, labelpad=1, **kwargs):
self.coords[self._x_index].set_axislabel(label, minpad=labelpad, **kwargs)
def set_ylabel(self, label, labelpad=1, **kwargs):
self.coords[self._y_index].set_axislabel(label, minpad=labelpad, **kwargs)
def get_xlabel(self):
return self.coords[self._x_index].get_axislabel()
def get_ylabel(self):
return self.coords[self._y_index].get_axislabel()
def get_coords_overlay(self, frame, coord_meta=None):
# Here we can't use get_transform because that deals with
# pixel-to-pixel transformations when passing a WCS object.
if isinstance(frame, WCS):
coords = CoordinatesMap(self, frame, frame_class=self.frame_class)
else:
if coord_meta is None:
coord_meta = get_coord_meta(frame)
transform = self._get_transform_no_transdata(frame)
coords = CoordinatesMap(self, transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class)
self._all_coords.append(coords)
# Common settings for overlay
coords[0].set_axislabel_position('t')
coords[1].set_axislabel_position('r')
coords[0].set_ticklabel_position('t')
coords[1].set_ticklabel_position('r')
self.overlay_coords = coords
return coords
def get_transform(self, frame):
"""
Return a transform from the specified frame to display coordinates.
This does not include the transData transformation
Parameters
----------
frame : :class:`~astropy.wcs.WCS` or :class:`~matplotlib.transforms.Transform` or str
The ``frame`` parameter can have several possible types:
* :class:`~astropy.wcs.WCS` instance: assumed to be a
transformation from pixel to world coordinates, where the
world coordinates are the same as those in the WCS
transformation used for this ``WCSAxes`` instance. This is
used for example to show contours, since this involves
plotting an array in pixel coordinates that are not the
final data coordinate and have to be transformed to the
common world coordinate system first.
* :class:`~matplotlib.transforms.Transform` instance: it is
assumed to be a transform to the world coordinates that are
part of the WCS used to instantiate this ``WCSAxes``
instance.
* ``'pixel'`` or ``'world'``: return a transformation that
allows users to plot in pixel/data coordinates (essentially
an identity transform) and ``world`` (the default
world-to-pixel transformation used to instantiate the
``WCSAxes`` instance).
* ``'fk5'`` or ``'galactic'``: return a transformation from
the specified frame to the pixel/data coordinates.
* :class:`~astropy.coordinates.BaseCoordinateFrame` instance.
"""
return self._get_transform_no_transdata(frame).inverted() + self.transData
def _get_transform_no_transdata(self, frame):
"""
Return a transform from data to the specified frame
"""
if self.wcs is None and frame != 'pixel':
raise ValueError('No WCS specified, so only pixel coordinates are available')
if isinstance(frame, WCS):
coord_in = wcs_to_celestial_frame(self.wcs)
coord_out = wcs_to_celestial_frame(frame)
if coord_in == coord_out:
return (WCSPixel2WorldTransform(self.wcs, slice=self.slices) +
WCSWorld2PixelTransform(frame))
else:
return (WCSPixel2WorldTransform(self.wcs, slice=self.slices) +
CoordinateTransform(self.wcs, frame) +
WCSWorld2PixelTransform(frame))
elif frame == 'pixel':
return Affine2D()
elif isinstance(frame, Transform):
pixel2world = WCSPixel2WorldTransform(self.wcs, slice=self.slices)
return pixel2world + frame
else:
pixel2world = WCSPixel2WorldTransform(self.wcs, slice=self.slices)
if frame == 'world':
return pixel2world
else:
coordinate_transform = CoordinateTransform(self.wcs, frame)
if coordinate_transform.same_frames:
return pixel2world
else:
return pixel2world + CoordinateTransform(self.wcs, frame)
def get_tightbbox(self, renderer):
if not self.get_visible():
return
bb = [b for b in self._bboxes if b and (b.width != 0 or b.height != 0)]
if bb:
_bbox = Bbox.union(bb)
return _bbox
else:
return self.get_window_extent(renderer)
def grid(self, b=None, axis='both', *, which='major', **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments. This behaves like `matplotlib.axes.Axes`
except that if no arguments are specified, the grid is shown rather
than toggled.
Parameters
----------
b : bool
Whether to show the gridlines.
"""
if not hasattr(self, 'coords'):
return
if which != 'major':
raise NotImplementedError('Plotting the grid for the minor ticks is '
'not supported.')
if axis == 'both':
self.coords.grid(draw_grid=b, **kwargs)
elif axis == 'x':
self.coords[0].grid(draw_grid=b, **kwargs)
elif axis == 'y':
self.coords[1].grid(draw_grid=b, **kwargs)
else:
raise ValueError('axis should be one of x/y/both')
# In the following, we put the generated subplot class in a temporary class and
# we then inherit it - if we don't do this, the generated class appears to
# belong in matplotlib, not in WCSAxes, from the API's point of view.
class WCSAxesSubplot(subplot_class_factory(WCSAxes)):
"""
A subclass class for WCSAxes
"""
pass
|
18cf0e0b0738a8dafd773988e9ec19875aeef8445e12892a366b1bcb1e7493f2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from ... import units as u
from ...coordinates import BaseCoordinateFrame
__all__ = ['select_step_degree', 'select_step_hour', 'select_step_scalar',
'coord_type_from_ctype', 'transform_contour_set_inplace']
def select_step_degree(dv):
# Modified from axis_artist, supports astropy.units
if dv > 1. * u.arcsec:
degree_limits_ = [1.5, 3, 7, 13, 20, 40, 70, 120, 270, 520]
degree_steps_ = [1, 2, 5, 10, 15, 30, 45, 90, 180, 360]
degree_units = [u.degree] * len(degree_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 8, 11, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 5, 10, 15, 20, 30]
minute_limits_ = np.array(minsec_limits_) / 60.
minute_units = [u.arcmin] * len(minute_limits_)
second_limits_ = np.array(minsec_limits_) / 3600.
second_units = [u.arcsec] * len(second_limits_)
degree_limits = np.concatenate([second_limits_,
minute_limits_,
degree_limits_])
degree_steps = minsec_steps_ + minsec_steps_ + degree_steps_
degree_units = second_units + minute_units + degree_units
n = degree_limits.searchsorted(dv.to(u.degree))
step = degree_steps[n]
unit = degree_units[n]
return step * unit
else:
return select_step_scalar(dv.to_value(u.arcsec)) * u.arcsec
def select_step_hour(dv):
if dv > 15. * u.arcsec:
hour_limits_ = [1.5, 2.5, 3.5, 5, 7, 10, 15, 21, 36]
hour_steps_ = [1, 2, 3, 4, 6, 8, 12, 18, 24]
hour_units = [u.hourangle] * len(hour_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 4.5, 5.5, 8, 11, 14, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30]
minute_limits_ = np.array(minsec_limits_) / 60.
minute_units = [15. * u.arcmin] * len(minute_limits_)
second_limits_ = np.array(minsec_limits_) / 3600.
second_units = [15. * u.arcsec] * len(second_limits_)
hour_limits = np.concatenate([second_limits_,
minute_limits_,
hour_limits_])
hour_steps = minsec_steps_ + minsec_steps_ + hour_steps_
hour_units = second_units + minute_units + hour_units
n = hour_limits.searchsorted(dv.to(u.hourangle))
step = hour_steps[n]
unit = hour_units[n]
return step * unit
else:
return select_step_scalar(dv.to_value(15. * u.arcsec)) * (15. * u.arcsec)
def select_step_scalar(dv):
log10_dv = np.log10(dv)
base = np.floor(log10_dv)
frac = log10_dv - base
steps = np.log10([1, 2, 5, 10])
imin = np.argmin(np.abs(frac - steps))
return 10. ** (base + steps[imin])
def get_coord_meta(frame):
coord_meta = {}
coord_meta['type'] = ('longitude', 'latitude')
coord_meta['wrap'] = (None, None)
coord_meta['unit'] = (u.deg, u.deg)
from astropy.coordinates import frame_transform_graph
if isinstance(frame, str):
initial_frame = frame
frame = frame_transform_graph.lookup_name(frame)
if frame is None:
raise ValueError("Unknown frame: {0}".format(initial_frame))
if not isinstance(frame, BaseCoordinateFrame):
frame = frame()
names = list(frame.representation_component_names.keys())
coord_meta['name'] = names[:2]
return coord_meta
def coord_type_from_ctype(ctype):
"""
Determine whether a particular WCS ctype corresponds to an angle or scalar
coordinate.
"""
if ctype[:4] == 'RA--':
return 'longitude', u.hourangle, None
elif ctype[:4] == 'HPLN':
return 'longitude', u.arcsec, 180.
elif ctype[:4] == 'HPLT':
return 'latitude', u.arcsec, None
elif ctype[:4] == 'HGLN':
return 'longitude', None, 180.
elif ctype[1:4] == 'LON' or ctype[2:4] == 'LN':
return 'longitude', None, None
elif ctype[:4] == 'DEC-' or ctype[1:4] == 'LAT' or ctype[2:4] == 'LT':
return 'latitude', None, None
else:
return 'scalar', None, None
def transform_contour_set_inplace(cset, transform):
"""
Transform a contour set in-place using a specified
:class:`matplotlib.transform.Transform`
Using transforms with the native Matplotlib contour/contourf can be slow if
the transforms have a non-negligible overhead (which is the case for
WCS/SkyCoord transforms) since the transform is called for each individual
contour line. It is more efficient to stack all the contour lines together
temporarily and transform them in one go.
"""
# The contours are represented as paths grouped into levels. Each can have
# one or more paths. The approach we take here is to stack the vertices of
# all paths and transform them in one go. The pos_level list helps us keep
# track of where the set of segments for each overall contour level ends.
# The pos_segments list helps us keep track of where each segmnt ends for
# each contour level.
all_paths = []
pos_level = []
pos_segments = []
for collection in cset.collections:
paths = collection.get_paths()
all_paths.append(paths)
# The last item in pos isn't needed for np.split and in fact causes
# issues if we keep it because it will cause an extra empty array to be
# returned.
pos = np.cumsum([len(x) for x in paths])
pos_segments.append(pos[:-1])
pos_level.append(pos[-1])
# As above the last item isn't needed
pos_level = np.cumsum(pos_level)[:-1]
# Stack all the segments into a single (n, 2) array
vertices = np.vstack(path.vertices for paths in all_paths for path in paths)
# Transform all coordinates in one go
vertices = transform.transform(vertices)
# Split up into levels again
vertices = np.split(vertices, pos_level)
# Now re-populate the segments in the line collections
for ilevel, vert in enumerate(vertices):
vert = np.split(vert, pos_segments[ilevel])
for iseg, ivert in enumerate(vert):
all_paths[ilevel][iseg].vertices = ivert
|
40e50da81df2cf72838205ddec36cfbcf2c1bf8529cf4ae2b077fc96103d0fcc | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import pytest
try:
import matplotlib.pyplot as plt
except ImportError:
HAS_PLT = False
else:
HAS_PLT = True
from ... import units as u
from ..units import quantity_support
@pytest.mark.skipif('not HAS_PLT')
def test_units():
plt.figure()
with quantity_support():
buff = io.BytesIO()
plt.plot([1, 2, 3] * u.m, [3, 4, 5] * u.kg, label='label')
plt.plot([105, 210, 315] * u.cm, [3050, 3025, 3010] * u.g)
plt.legend()
# Also test fill_between, which requires actual conversion to ndarray
# with numpy >=1.10 (#4654).
plt.fill_between([1, 3] * u.m, [3, 5] * u.kg, [3050, 3010] * u.g)
plt.savefig(buff, format='svg')
assert plt.gca().xaxis.get_units() == u.m
assert plt.gca().yaxis.get_units() == u.kg
plt.clf()
@pytest.mark.skipif('not HAS_PLT')
def test_units_errbarr():
pytest.importorskip("matplotlib", minversion="2.2")
plt.figure()
with quantity_support():
x = [1, 2, 3] * u.s
y = [1, 2, 3] * u.m
yerr = [3, 2, 1] * u.cm
fig, ax = plt.subplots()
ax.errorbar(x, y, yerr=yerr)
assert ax.xaxis.get_units() == u.s
assert ax.yaxis.get_units() == u.m
plt.clf()
@pytest.mark.skipif('not HAS_PLT')
def test_incompatible_units():
plt.figure()
with quantity_support():
plt.plot([1, 2, 3] * u.m)
with pytest.raises(u.UnitConversionError):
plt.plot([105, 210, 315] * u.kg)
plt.clf()
|
fdfaea810831d305e63759f36fb13b1141559cf9bd42f40a4030b3a47ca98036 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_almost_equal
from .... import units as u
from ..utils import (select_step_degree, select_step_hour, select_step_scalar,
coord_type_from_ctype)
from ....tests.helper import (assert_quantity_allclose as
assert_almost_equal_quantity)
def test_select_step_degree():
assert_almost_equal_quantity(select_step_degree(127 * u.deg), 180. * u.deg)
assert_almost_equal_quantity(select_step_degree(44 * u.deg), 45. * u.deg)
assert_almost_equal_quantity(select_step_degree(18 * u.arcmin), 15 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(3.4 * u.arcmin), 3 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(2 * u.arcmin), 2 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(59 * u.arcsec), 1 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(33 * u.arcsec), 30 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(2.2 * u.arcsec), 2 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.8 * u.arcsec), 1 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.2 * u.arcsec), 0.2 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.11 * u.arcsec), 0.1 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.022 * u.arcsec), 0.02 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.0043 * u.arcsec), 0.005 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.00083 * u.arcsec), 0.001 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.000027 * u.arcsec), 0.00002 * u.arcsec)
def test_select_step_hour():
assert_almost_equal_quantity(select_step_hour(127 * u.deg), 8. * u.hourangle)
assert_almost_equal_quantity(select_step_hour(44 * u.deg), 3. * u.hourangle)
assert_almost_equal_quantity(select_step_hour(18 * u.arcmin), 15 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(3.4 * u.arcmin), 3 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(2 * u.arcmin), 1.5 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(59 * u.arcsec), 1 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(33 * u.arcsec), 30 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(2.2 * u.arcsec), 3. * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.8 * u.arcsec), 0.75 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.2 * u.arcsec), 0.15 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.11 * u.arcsec), 0.15 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.022 * u.arcsec), 0.03 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.0043 * u.arcsec), 0.003 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.00083 * u.arcsec), 0.00075 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.000027 * u.arcsec), 0.00003 * u.arcsec)
def test_select_step_scalar():
assert_almost_equal(select_step_scalar(33122.), 50000.)
assert_almost_equal(select_step_scalar(433.), 500.)
assert_almost_equal(select_step_scalar(12.3), 10)
assert_almost_equal(select_step_scalar(3.3), 5.)
assert_almost_equal(select_step_scalar(0.66), 0.5)
assert_almost_equal(select_step_scalar(0.0877), 0.1)
assert_almost_equal(select_step_scalar(0.00577), 0.005)
assert_almost_equal(select_step_scalar(0.00022), 0.0002)
assert_almost_equal(select_step_scalar(0.000012), 0.00001)
assert_almost_equal(select_step_scalar(0.000000443), 0.0000005)
def test_coord_type_from_ctype():
assert coord_type_from_ctype(' LON') == ('longitude', None, None)
assert coord_type_from_ctype(' LAT') == ('latitude', None, None)
assert coord_type_from_ctype('HPLN') == ('longitude', u.arcsec, 180.)
assert coord_type_from_ctype('HPLT') == ('latitude', u.arcsec, None)
assert coord_type_from_ctype('RA--') == ('longitude', u.hourangle, None)
assert coord_type_from_ctype('DEC-') == ('latitude', None, None)
assert coord_type_from_ctype('spam') == ('scalar', None, None)
|
ee9816f2d066b683426bf50068beeeb8ba8b36452e442584a42e3ce56dfa56fb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
from matplotlib import rc_context
from .... import units as u
from ....tests.helper import assert_quantity_allclose
from ....units import UnitsError
from ..formatter_locator import AngleFormatterLocator, ScalarFormatterLocator
class TestAngleFormatterLocator:
def test_no_options(self):
fl = AngleFormatterLocator()
assert fl.values is None
assert fl.number == 5
assert fl.spacing is None
def test_too_many_options(self):
with pytest.raises(ValueError) as exc:
AngleFormatterLocator(values=[1., 2.], number=5)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
AngleFormatterLocator(values=[1., 2.], spacing=5. * u.deg)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
AngleFormatterLocator(number=5, spacing=5. * u.deg)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
AngleFormatterLocator(values=[1., 2.], number=5, spacing=5. * u.deg)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
def test_values(self):
fl = AngleFormatterLocator(values=[0.1, 1., 14.] * u.degree)
assert fl.values.to_value(u.degree).tolist() == [0.1, 1., 14.]
assert fl.number is None
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.to_value(u.degree), [0.1, 1., 14.])
def test_number(self):
fl = AngleFormatterLocator(number=7)
assert fl.values is None
assert fl.number == 7
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.to_value(u.degree), [35., 40., 45., 50., 55.])
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [34.5, 34.75, 35., 35.25, 35.5, 35.75, 36.])
fl.format = 'dd'
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [35., 36.])
def test_spacing(self):
with pytest.raises(TypeError) as exc:
AngleFormatterLocator(spacing=3.)
assert exc.value.args[0] == "spacing should be an astropy.units.Quantity instance with units of angle"
fl = AngleFormatterLocator(spacing=3. * u.degree)
assert fl.values is None
assert fl.number is None
assert fl.spacing == 3. * u.degree
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.to_value(u.degree), [36., 39., 42., 45., 48., 51., 54.])
fl.spacing = 30. * u.arcmin
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [34.5, 35., 35.5, 36.])
fl.format = 'dd'
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [35., 36.])
def test_minor_locator(self):
fl = AngleFormatterLocator()
values, spacing = fl.locator(34.3, 55.4)
minor_values = fl.minor_locator(spacing, 5, 34.3, 55.4)
assert_almost_equal(minor_values.to_value(u.degree), [36., 37., 38.,
39., 41., 42., 43., 44., 46., 47., 48., 49., 51.,
52., 53., 54.])
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.to_value(u.degree), [37.5, 42.5, 47.5, 52.5])
fl.values = [0.1, 1., 14.] * u.degree
values, spacing = fl.locator(34.3, 36.1)
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.to_value(u.degree), [])
@pytest.mark.parametrize(('format', 'string'), [('dd', '15\xb0'),
('dd:mm', '15\xb024\''),
('dd:mm:ss', '15\xb023\'32"'),
('dd:mm:ss.s', '15\xb023\'32.0"'),
('dd:mm:ss.ssss', '15\xb023\'32.0316"'),
('hh', '1h'),
('hh:mm', '1h02m'),
('hh:mm:ss', '1h01m34s'),
('hh:mm:ss.s', '1h01m34.1s'),
('hh:mm:ss.ssss', '1h01m34.1354s'),
('d', '15\xb0'),
('d.d', '15.4\xb0'),
('d.dd', '15.39\xb0'),
('d.ddd', '15.392\xb0'),
('m', '924\''),
('m.m', '923.5\''),
('m.mm', '923.53\''),
('s', '55412"'),
('s.s', '55412.0"'),
('s.ss', '55412.03"'),
])
def test_format(self, format, string):
fl = AngleFormatterLocator(number=5, format=format)
print(fl.formatter([15.392231] * u.degree, None, format='ascii')[0], string)
assert fl.formatter([15.392231] * u.degree, None, format='ascii')[0] == string
@pytest.mark.parametrize(('separator', 'format', 'string'), [(('deg', "'", '"'), 'dd', '15deg'),
(('deg', "'", '"'), 'dd:mm', '15deg24\''),
(('deg', "'", '"'), 'dd:mm:ss', '15deg23\'32"'),
((':', "-", 's'), 'dd:mm:ss.s', '15:23-32.0s'),
(':', 'dd:mm:ss.s', '15:23:32.0'),
((':', ":", 's'), 'hh', '1:'),
(('-', "-", 's'), 'hh:mm:ss.ssss', '1-01-34.1354s'),
(('d', ":", '"'), 'd', '15\xb0'),
(('d', ":", '"'), 'd.d', '15.4\xb0'),
])
def test_separator(self, separator, format, string):
fl = AngleFormatterLocator(number=5, format=format)
fl.sep = separator
assert fl.formatter([15.392231] * u.degree, None)[0] == string
def test_latex_format(self):
fl = AngleFormatterLocator(number=5, format="dd:mm:ss")
assert fl.formatter([15.392231] * u.degree, None)[0] == '15\xb023\'32"'
with rc_context(rc={'text.usetex': True}):
assert fl.formatter([15.392231] * u.degree, None)[0] == "$15^\\circ23{}^\\prime32{}^{\\prime\\prime}$"
@pytest.mark.parametrize(('format'), ['x.xxx', 'dd.ss', 'dd:ss', 'mdd:mm:ss'])
def test_invalid_formats(self, format):
fl = AngleFormatterLocator(number=5)
with pytest.raises(ValueError) as exc:
fl.format = format
assert exc.value.args[0] == "Invalid format: " + format
@pytest.mark.parametrize(('format', 'base_spacing'), [('dd', 1. * u.deg),
('dd:mm', 1. * u.arcmin),
('dd:mm:ss', 1. * u.arcsec),
('dd:mm:ss.ss', 0.01 * u.arcsec),
('hh', 15. * u.deg),
('hh:mm', 15. * u.arcmin),
('hh:mm:ss', 15. * u.arcsec),
('hh:mm:ss.ss', 0.15 * u.arcsec),
('d', 1. * u.deg),
('d.d', 0.1 * u.deg),
('d.dd', 0.01 * u.deg),
('d.ddd', 0.001 * u.deg),
('m', 1. * u.arcmin),
('m.m', 0.1 * u.arcmin),
('m.mm', 0.01 * u.arcmin),
('s', 1. * u.arcsec),
('s.s', 0.1 * u.arcsec),
('s.ss', 0.01 * u.arcsec),
])
def test_base_spacing(self, format, base_spacing):
fl = AngleFormatterLocator(number=5, format=format)
assert fl.base_spacing == base_spacing
def test_incorrect_spacing(self):
fl = AngleFormatterLocator()
fl.spacing = 0.032 * u.deg
fl.format = 'dd:mm:ss'
assert_almost_equal(fl.spacing.to_value(u.arcsec), 115.)
def test_decimal_values(self):
# Regression test for a bug that meant that the spacing was not
# determined correctly for decimal coordinates
fl = AngleFormatterLocator()
fl.format = 'd.dddd'
assert_quantity_allclose(fl.locator(266.9730, 266.9750)[0],
[266.9735, 266.9740, 266.9745, 266.9750] * u.deg)
fl = AngleFormatterLocator(decimal=True, format_unit=u.hourangle, number=4)
assert_quantity_allclose(fl.locator(266.9730, 266.9750)[0],
[17.79825, 17.79830] * u.hourangle)
def test_values_unit(self):
# Make sure that the intrinsic unit and format unit are correctly
# taken into account when using the locator
fl = AngleFormatterLocator(unit=u.arcsec, format_unit=u.arcsec, decimal=True)
assert_quantity_allclose(fl.locator(850, 2150)[0],
[1000., 1200., 1400., 1600., 1800., 2000.] * u.arcsec)
fl = AngleFormatterLocator(unit=u.arcsec, format_unit=u.degree, decimal=False)
assert_quantity_allclose(fl.locator(850, 2150)[0],
[15., 20., 25., 30., 35.] * u.arcmin)
fl = AngleFormatterLocator(unit=u.arcsec, format_unit=u.hourangle, decimal=False)
assert_quantity_allclose(fl.locator(850, 2150)[0],
[60., 75., 90., 105., 120., 135.] * (15 * u.arcsec))
fl = AngleFormatterLocator(unit=u.arcsec)
fl.format = 'dd:mm:ss'
assert_quantity_allclose(fl.locator(0.9, 1.1)[0], [1] * u.arcsec)
fl = AngleFormatterLocator(unit=u.arcsec, spacing=0.2 * u.arcsec)
assert_quantity_allclose(fl.locator(0.3, 0.9)[0], [0.4, 0.6, 0.8] * u.arcsec)
@pytest.mark.parametrize(('spacing', 'string'), [(2 * u.deg, '15\xb0'),
(2 * u.arcmin, '15\xb024\''),
(2 * u.arcsec, '15\xb023\'32"'),
(0.1 * u.arcsec, '15\xb023\'32.0"')])
def test_formatter_no_format(self, spacing, string):
fl = AngleFormatterLocator()
assert fl.formatter([15.392231] * u.degree, spacing)[0] == string
@pytest.mark.parametrize(('format_unit', 'decimal', 'show_decimal_unit', 'spacing', 'ascii', 'latex'),
[(u.degree, False, True, 2 * u.degree, '15\xb0', '$15^\circ$'),
(u.degree, False, True, 2 * u.arcmin, '15\xb024\'', '$15^\circ24{}^\prime$'),
(u.degree, False, True, 2 * u.arcsec, '15\xb023\'32"', '$15^\circ23{}^\prime32{}^{\prime\prime}$'),
(u.degree, False, True, 0.1 * u.arcsec, '15\xb023\'32.0"', '$15^\circ23{}^\prime32.0{}^{\prime\prime}$'),
(u.hourangle, False, True, 15 * u.degree, '1h', '$1^\mathrm{h}$'),
(u.hourangle, False, True, 15 * u.arcmin, '1h02m', '$1^\mathrm{h}02^\mathrm{m}$'),
(u.hourangle, False, True, 15 * u.arcsec, '1h01m34s', '$1^\mathrm{h}01^\mathrm{m}34^\mathrm{s}$'),
(u.hourangle, False, True, 1.5 * u.arcsec, '1h01m34.1s', '$1^\mathrm{h}01^\mathrm{m}34.1^\mathrm{s}$'),
(u.degree, True, True, 15 * u.degree, '15\xb0', '$15\mathrm{^\circ}$'),
(u.degree, True, True, 0.12 * u.degree, '15.39\xb0', '$15.39\mathrm{^\circ}$'),
(u.degree, True, True, 0.0036 * u.arcsec, '15.392231\xb0', '$15.392231\mathrm{^\circ}$'),
(u.arcmin, True, True, 15 * u.degree, '924\'', '$924\mathrm{^\prime}$'),
(u.arcmin, True, True, 0.12 * u.degree, '923.5\'', '$923.5\mathrm{^\prime}$'),
(u.arcmin, True, True, 0.1 * u.arcmin, '923.5\'', '$923.5\mathrm{^\prime}$'),
(u.arcmin, True, True, 0.0002 * u.arcmin, '923.5339\'', '$923.5339\mathrm{^\prime}$'),
(u.arcsec, True, True, 0.01 * u.arcsec, '55412.03"', '$55412.03\mathrm{^{\prime\prime}}$'),
(u.arcsec, True, True, 0.001 * u.arcsec, '55412.032"', '$55412.032\mathrm{^{\prime\prime}}$'),
(u.mas, True, True, 0.001 * u.arcsec, '55412032mas', '$55412032\mathrm{mas}$'),
(u.degree, True, False, 15 * u.degree, '15', '15'),
(u.degree, True, False, 0.12 * u.degree, '15.39', '15.39'),
(u.degree, True, False, 0.0036 * u.arcsec, '15.392231', '15.392231'),
(u.arcmin, True, False, 15 * u.degree, '924', '924'),
(u.arcmin, True, False, 0.12 * u.degree, '923.5', '923.5'),
(u.arcmin, True, False, 0.1 * u.arcmin, '923.5', '923.5'),
(u.arcmin, True, False, 0.0002 * u.arcmin, '923.5339', '923.5339'),
(u.arcsec, True, False, 0.01 * u.arcsec, '55412.03', '55412.03'),
(u.arcsec, True, False, 0.001 * u.arcsec, '55412.032', '55412.032'),
(u.mas, True, False, 0.001 * u.arcsec, '55412032', '55412032'),
# Make sure that specifying None defaults to
# decimal for non-degree or non-hour angles
(u.arcsec, None, True, 0.01 * u.arcsec, '55412.03"', '$55412.03\mathrm{^{\prime\prime}}$')])
def test_formatter_no_format_with_units(self, format_unit, decimal, show_decimal_unit, spacing, ascii, latex):
# Check the formatter works when specifying the default units and
# decimal behavior to use.
fl = AngleFormatterLocator(unit=u.degree, format_unit=format_unit, decimal=decimal, show_decimal_unit=show_decimal_unit)
assert fl.formatter([15.392231] * u.degree, spacing, format='ascii')[0] == ascii
assert fl.formatter([15.392231] * u.degree, spacing, format='latex')[0] == latex
def test_incompatible_unit_decimal(self):
with pytest.raises(UnitsError) as exc:
AngleFormatterLocator(unit=u.arcmin, decimal=False)
assert exc.value.args[0] == 'Units should be degrees or hours when using non-decimal (sexagesimal) mode'
class TestScalarFormatterLocator:
def test_no_options(self):
fl = ScalarFormatterLocator(unit=u.m)
assert fl.values is None
assert fl.number == 5
assert fl.spacing is None
def test_too_many_options(self):
with pytest.raises(ValueError) as exc:
ScalarFormatterLocator(values=[1., 2.] * u.m, number=5)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
ScalarFormatterLocator(values=[1., 2.] * u.m, spacing=5. * u.m)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
ScalarFormatterLocator(number=5, spacing=5. * u.m)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
ScalarFormatterLocator(values=[1., 2.] * u.m, number=5, spacing=5. * u.m)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
def test_values(self):
fl = ScalarFormatterLocator(values=[0.1, 1., 14.] * u.m, unit=u.m)
assert fl.values.value.tolist() == [0.1, 1., 14.]
assert fl.number is None
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, [0.1, 1., 14.])
def test_number(self):
fl = ScalarFormatterLocator(number=7, unit=u.m)
assert fl.values is None
assert fl.number == 7
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, np.linspace(36., 54., 10))
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, np.linspace(34.4, 36, 9))
fl.format = 'x'
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [35., 36.])
def test_spacing(self):
fl = ScalarFormatterLocator(spacing=3. * u.m)
assert fl.values is None
assert fl.number is None
assert fl.spacing == 3. * u.m
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, [36., 39., 42., 45., 48., 51., 54.])
fl.spacing = 0.5 * u.m
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [34.5, 35., 35.5, 36.])
fl.format = 'x'
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [35., 36.])
def test_minor_locator(self):
fl = ScalarFormatterLocator(unit=u.m)
values, spacing = fl.locator(34.3, 55.4)
minor_values = fl.minor_locator(spacing, 5, 34.3, 55.4)
assert_almost_equal(minor_values.value, [36., 37., 38., 39., 41., 42.,
43., 44., 46., 47., 48., 49., 51., 52., 53., 54.])
print('minor_values: ' + str(minor_values))
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.value, [37.5, 42.5, 47.5, 52.5])
fl.values = [0.1, 1., 14.] * u.m
values, spacing = fl.locator(34.3, 36.1)
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.value, [])
@pytest.mark.parametrize(('format', 'string'), [('x', '15'),
('x.x', '15.4'),
('x.xx', '15.39'),
('x.xxx', '15.392'),
('%g', '15.3922'),
('%f', '15.392231'),
('%.2f', '15.39'),
('%.3f', '15.392')])
def test_format(self, format, string):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
assert fl.formatter([15.392231] * u.m, None)[0] == string
@pytest.mark.parametrize(('format', 'string'), [('x', '1539'),
('x.x', '1539.2'),
('x.xx', '1539.22'),
('x.xxx', '1539.223')])
def test_format_unit(self, format, string):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
fl.format_unit = u.cm
assert fl.formatter([15.392231] * u.m, None)[0] == string
@pytest.mark.parametrize(('format'), ['dd', 'dd:mm', 'xx:mm', 'mx.xxx'])
def test_invalid_formats(self, format):
fl = ScalarFormatterLocator(number=5, unit=u.m)
with pytest.raises(ValueError) as exc:
fl.format = format
assert exc.value.args[0] == "Invalid format: " + format
@pytest.mark.parametrize(('format', 'base_spacing'), [('x', 1. * u.m),
('x.x', 0.1 * u.m),
('x.xxx', 0.001 * u.m)])
def test_base_spacing(self, format, base_spacing):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
assert fl.base_spacing == base_spacing
def test_incorrect_spacing(self):
fl = ScalarFormatterLocator(unit=u.m)
fl.spacing = 0.032 * u.m
fl.format = 'x.xx'
assert_almost_equal(fl.spacing.to_value(u.m), 0.03)
def test_values_unit(self):
# Make sure that the intrinsic unit and format unit are correctly
# taken into account when using the locator
fl = ScalarFormatterLocator(unit=u.cm, format_unit=u.m)
assert_quantity_allclose(fl.locator(850, 2150)[0],
[1000., 1200., 1400., 1600., 1800., 2000.] * u.cm)
fl = ScalarFormatterLocator(unit=u.cm, format_unit=u.m)
fl.format = 'x.x'
assert_quantity_allclose(fl.locator(1, 19)[0], [10] * u.cm)
|
09a0514441375ae817b38fdc7c62fdda647c0bfecb40bee2210359fce6c7f091 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from ..core import WCSAxes
import matplotlib.pyplot as plt
from matplotlib.backend_bases import KeyEvent
from ....wcs import WCS
from ....coordinates import FK5
from ....time import Time
from ....tests.image_tests import ignore_matplotlibrc
from .test_images import BaseImageTests
class TestDisplayWorldCoordinate(BaseImageTests):
@ignore_matplotlibrc
def test_overlay_coords(self, tmpdir):
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 4))
canvas = fig.canvas
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs)
fig.add_axes(ax)
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test1.png').strpath)
# Testing default displayed world coordinates
string_world = ax._display_world_coords(0.523412, 0.518311)
assert string_world == '0\xb029\'45" -0\xb029\'20" (world)'
# Test pixel coordinates
event1 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event1.key, guiEvent=event1)
string_pixel = ax._display_world_coords(0.523412, 0.523412)
assert string_pixel == "0.523412 0.523412 (pixel)"
event3 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event3.key, guiEvent=event3)
# Test that it still displays world coords when there are no overlay coords
string_world2 = ax._display_world_coords(0.523412, 0.518311)
assert string_world2 == '0\xb029\'45" -0\xb029\'20" (world)'
overlay = ax.get_coords_overlay('fk5')
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter('d.ddd')
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test2.png').strpath)
event4 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event4.key, guiEvent=event4)
# Test that it displays the overlay world coordinates
string_world3 = ax._display_world_coords(0.523412, 0.518311)
assert string_world3 == '267.176\xb0 -28\xb045\'56" (world, overlay 1)'
overlay = ax.get_coords_overlay(FK5())
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter('d.ddd')
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test3.png').strpath)
event5 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event4.key, guiEvent=event4)
# Test that it displays the overlay world coordinates
string_world4 = ax._display_world_coords(0.523412, 0.518311)
assert string_world4 == '267.176\xb0 -28\xb045\'56" (world, overlay 2)'
overlay = ax.get_coords_overlay(FK5(equinox=Time("J2030")))
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter('d.ddd')
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test4.png').strpath)
event6 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event5.key, guiEvent=event6)
# Test that it displays the overlay world coordinates
string_world5 = ax._display_world_coords(0.523412, 0.518311)
assert string_world5 == '267.652\xb0 -28\xb046\'23" (world, overlay 3)'
@ignore_matplotlibrc
def test_cube_coords(self, tmpdir):
wcs = WCS(self.cube_header)
fig = plt.figure(figsize=(4, 4))
canvas = fig.canvas
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs, slices=('y', 50, 'x'))
fig.add_axes(ax)
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test.png').strpath)
# Testing default displayed world coordinates
string_world = ax._display_world_coords(0.523412, 0.518311)
assert string_world == '2563 3h26m52.0s (world)'
# Test pixel coordinates
event1 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event1.key, guiEvent=event1)
string_pixel = ax._display_world_coords(0.523412, 0.523412)
assert string_pixel == "0.523412 0.523412 (pixel)"
|
147ec4460ec50ec1a27646a7319c827d9c2322dfb064eb75f7eca21b86de57bf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from unittest.mock import patch
import pytest
import matplotlib.pyplot as plt
from astropy.wcs import WCS
from astropy.io import fits
from ..core import WCSAxes
from .... import units as u
from ....tests.image_tests import ignore_matplotlibrc
ROOT = os.path.join(os.path.dirname(__file__))
MSX_HEADER = fits.Header.fromtextfile(os.path.join(ROOT, 'data', 'msx_header'))
@ignore_matplotlibrc
def test_getaxislabel():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
ax.coords[0].set_axislabel("X")
ax.coords[1].set_axislabel("Y")
assert ax.coords[0].get_axislabel() == "X"
assert ax.coords[1].get_axislabel() == "Y"
@pytest.fixture
def ax():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
fig.add_axes(ax)
return ax
def assert_label_draw(ax, x_label, y_label):
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
with patch.object(ax.coords[0].axislabels, 'set_position') as pos1:
with patch.object(ax.coords[1].axislabels, 'set_position') as pos2:
ax.figure.canvas.draw()
assert pos1.call_count == x_label
assert pos2.call_count == y_label
@ignore_matplotlibrc
def test_label_visibility_rules_default(ax):
assert_label_draw(ax, True, True)
@ignore_matplotlibrc
def test_label_visibility_rules_label(ax):
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.one)
assert_label_draw(ax, False, False)
@ignore_matplotlibrc
def test_label_visibility_rules_ticks(ax):
ax.coords[0].set_axislabel_visibility_rule('ticks')
ax.coords[1].set_axislabel_visibility_rule('ticks')
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.one)
assert_label_draw(ax, True, False)
@ignore_matplotlibrc
def test_label_visibility_rules_always(ax):
ax.coords[0].set_axislabel_visibility_rule('always')
ax.coords[1].set_axislabel_visibility_rule('always')
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.one)
assert_label_draw(ax, True, True)
def test_set_separator(tmpdir):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=WCS(MSX_HEADER))
fig.add_axes(ax)
# Force a draw which is required for format_coord to work
ax.figure.canvas.draw()
ax.coords[1].set_format_unit('deg')
assert ax.coords[1].format_coord(4) == '4\xb000\'00\"'
ax.coords[1].set_separator((':', ':', ''))
assert ax.coords[1].format_coord(4) == '4:00:00'
ax.coords[1].set_separator('abc')
assert ax.coords[1].format_coord(4) == '4a00b00c'
ax.coords[1].set_separator(None)
assert ax.coords[1].format_coord(4) == '4\xb000\'00\"'
|
9269a6501ce76c419a93537788e9d6cb186bdb947e070353aaf327f0bb3f0276 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import pytest
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, Rectangle
from matplotlib import rc_context
from .... import units as u
from ....io import fits
from ....wcs import WCS
from ....coordinates import SkyCoord
from ..patches import SphericalCircle
from .. import WCSAxes
from . import datasets
from ....tests.image_tests import IMAGE_REFERENCE_DIR
from ..frame import EllipticalFrame
class BaseImageTests:
@classmethod
def setup_class(cls):
cls._data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
msx_header = os.path.join(cls._data_dir, 'msx_header')
cls.msx_header = fits.Header.fromtextfile(msx_header)
rosat_header = os.path.join(cls._data_dir, 'rosat_header')
cls.rosat_header = fits.Header.fromtextfile(rosat_header)
twoMASS_k_header = os.path.join(cls._data_dir, '2MASS_k_header')
cls.twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
cube_header = os.path.join(cls._data_dir, 'cube_header')
cls.cube_header = fits.Header.fromtextfile(cube_header)
slice_header = os.path.join(cls._data_dir, 'slice_header')
cls.slice_header = fits.Header.fromtextfile(slice_header)
class TestBasic(BaseImageTests):
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='image_plot.png',
tolerance=0, style={})
def test_image_plot(self):
# Test for plotting image and also setting values of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect='equal')
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0., 0.20] * u.degree, size=5, width=1)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=1.5)
@pytest.mark.parametrize('axisbelow', [True, False, 'line'])
def test_axisbelow(self, axisbelow):
# Test that tick marks, labels, and gridlines are drawn with the
# correct zorder controlled by the axisbelow property.
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect='equal')
ax.set_axisbelow(axisbelow)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0., 0.20] * u.degree, size=5, width=1)
ax.grid()
# Add an image (default zorder=0).
ax.imshow(np.zeros((64, 64)))
# Add a patch (default zorder=1).
r = Rectangle((30., 50.), 60., 50., facecolor='green', edgecolor='red')
ax.add_patch(r)
# Add a line (default zorder=2).
ax.plot([32, 128], [32, 128], linewidth=10)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='contour_overlay.png',
tolerance=0, style={})
def test_contour_overlay(self):
# Test for overlaying contours on images
hdu_msx = datasets.fetch_msx_hdu()
wcs_msx = WCS(self.msx_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contour(hdu_msx.data, transform=ax.get_transform(wcs_msx),
colors='orange', levels=[2.5e-5, 5e-5, 1.e-4])
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0., 720.)
ax.set_ylim(0., 720.)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='contourf_overlay.png',
tolerance=0, style={})
def test_contourf_overlay(self):
# Test for overlaying contours on images
hdu_msx = datasets.fetch_msx_hdu()
wcs_msx = WCS(self.msx_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contourf(hdu_msx.data, transform=ax.get_transform(wcs_msx),
levels=[2.5e-5, 5e-5, 1.e-4])
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0., 720.)
ax.set_ylim(0., 720.)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='overlay_features_image.png',
tolerance=0, style={})
def test_overlay_features_image(self):
# Test for overlaying grid, changing format of ticks, setting spacing
# and number of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.25, 0.25, 0.65, 0.65],
projection=WCS(self.msx_header), aspect='equal')
# Change the format of the ticks
ax.coords[0].set_major_formatter('dd:mm:ss')
ax.coords[1].set_major_formatter('dd:mm:ss.ssss')
# Overlay grid on image
ax.grid(color='red', alpha=1.0, lw=1, linestyle='dashed')
# Set the spacing of ticks on the 'glon' axis to 4 arcsec
ax.coords['glon'].set_ticks(spacing=4 * u.arcsec, size=5, width=1)
# Set the number of ticks on the 'glat' axis to 9
ax.coords['glat'].set_ticks(number=9, size=5, width=1)
# Set labels on axes
ax.coords['glon'].set_axislabel('Galactic Longitude', minpad=1.6)
ax.coords['glat'].set_axislabel('Galactic Latitude', minpad=-0.75)
# Change the frame linewidth and color
ax.coords.frame.set_color('red')
ax.coords.frame.set_linewidth(2)
assert ax.coords.frame.get_color() == 'red'
assert ax.coords.frame.get_linewidth() == 2
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='curvlinear_grid_patches_image.png',
tolerance=0, style={})
def test_curvilinear_grid_patches_image(self):
# Overlay curvilinear grid and patches on image
fig = plt.figure(figsize=(8, 8))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.rosat_header), aspect='equal')
ax.set_xlim(-0.5, 479.5)
ax.set_ylim(-0.5, 239.5)
ax.grid(color='black', alpha=1.0, lw=1, linestyle='dashed')
p = Circle((300, 100), radius=40, ec='yellow', fc='none')
ax.add_patch(p)
p = Circle((30., 20.), radius=20., ec='orange', fc='none',
transform=ax.get_transform('world'))
ax.add_patch(p)
p = Circle((60., 50.), radius=20., ec='red', fc='none',
transform=ax.get_transform('fk5'))
ax.add_patch(p)
p = Circle((40., 60.), radius=20., ec='green', fc='none',
transform=ax.get_transform('galactic'))
ax.add_patch(p)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='cube_slice_image.png',
tolerance=0, style={})
def test_cube_slice_image(self):
# Test for cube slicing
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, 'y', 'x'), aspect='equal')
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_axislabel('Velocity m/s')
ax.coords[1].set_ticks(spacing=0.2 * u.deg, width=1,
exclude_overlapping=True)
ax.coords[2].set_ticks(spacing=400 * u.m / u.s, width=1,
exclude_overlapping=True)
ax.coords[1].grid(grid_type='contours', color='red', linestyle='solid')
ax.coords[2].grid(grid_type='contours', color='red', linestyle='solid')
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='cube_slice_image_lonlat.png',
tolerance=0, style={})
def test_cube_slice_image_lonlat(self):
# Test for cube slicing. Here we test with longitude and latitude since
# there is some longitude-specific code in _update_grid_contour.
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=('x', 'y', 50), aspect='equal')
ax.set_xlim(-0.5, 106.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].grid(grid_type='contours', color='blue', linestyle='solid')
ax.coords[1].grid(grid_type='contours', color='red', linestyle='solid')
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_plot_coord(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(266 * u.deg, -29 * u.deg)
ax.plot_coord(c, 'o')
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_plot_line(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord([266, 266.8] * u.deg, [-29, -28.9] * u.deg)
ax.plot_coord(c)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='changed_axis_units.png',
tolerance=0, style={})
def test_changed_axis_units(self):
# Test to see if changing the units of axis works
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, 'y', 'x'), aspect='equal')
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_major_formatter('x.xx')
ax.coords[2].set_format_unit(u.km / u.s)
ax.coords[2].set_axislabel('Velocity km/s')
ax.coords[1].set_ticks(width=1, exclude_overlapping=True)
ax.coords[2].set_ticks(width=1, exclude_overlapping=True)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='minor_ticks_image.png',
tolerance=0, style={})
def test_minor_ticks(self):
# Test for drawing minor ticks
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, 'y', 'x'), aspect='equal')
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_ticks(exclude_overlapping=True)
ax.coords[1].set_ticks(exclude_overlapping=True)
ax.coords[2].display_minor_ticks(True)
ax.coords[1].display_minor_ticks(True)
ax.coords[2].set_minor_frequency(3)
ax.coords[1].set_minor_frequency(10)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='ticks_labels.png',
tolerance=0, style={})
def test_ticks_labels(self):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.1, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.coords[0].set_ticks(size=10, color='blue', alpha=0.2, width=1)
ax.coords[1].set_ticks(size=20, color='red', alpha=0.9, width=1)
ax.coords[0].set_ticks_position('all')
ax.coords[1].set_ticks_position('all')
ax.coords[0].set_axislabel('X-axis', size=20)
ax.coords[1].set_axislabel('Y-axis', color='green', size=25,
weight='regular', style='normal',
family='cmtt10')
ax.coords[0].set_axislabel_position('t')
ax.coords[1].set_axislabel_position('r')
ax.coords[0].set_ticklabel(color='purple', size=15, alpha=1,
weight='light', style='normal',
family='cmss10')
ax.coords[1].set_ticklabel(color='black', size=18, alpha=0.9,
weight='bold', family='cmr10')
ax.coords[0].set_ticklabel_position('all')
ax.coords[1].set_ticklabel_position('r')
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='rcparams.png',
tolerance=0, style={})
def test_rcparams(self):
# Test default style (matplotlib.rcParams) for ticks and gridlines
with rc_context({
'xtick.color': 'red',
'xtick.major.size': 20,
'xtick.major.width': 2,
'grid.color': 'blue',
'grid.linestyle': ':',
'grid.linewidth': 1,
'grid.alpha': 0.5}):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.1, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.grid()
ax.coords[0].set_ticks(exclude_overlapping=True)
ax.coords[1].set_ticks(exclude_overlapping=True)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='tick_angles.png',
tolerance=0, style={})
def test_tick_angles(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels. Addresses #45, #46.
w = WCS()
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = 'ICRS'
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color='gray', alpha=0.5, linestyle='solid')
ax.coords['ra'].set_ticks(color='red', size=20)
ax.coords['dec'].set_ticks(color='red', size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='tick_angles_non_square_axes.png',
tolerance=0, style={})
def test_tick_angles_non_square_axes(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels, and the axes are
# non-square.
w = WCS()
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = 'ICRS'
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(6, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color='gray', alpha=0.5, linestyle='solid')
ax.coords['ra'].set_ticks(color='red', size=20)
ax.coords['dec'].set_ticks(color='red', size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='set_coord_type.png',
tolerance=0, style={})
def test_set_coord_type(self):
# Test for setting coord_type
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6],
projection=WCS(self.msx_header),
aspect='equal')
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_coord_type('scalar')
ax.coords[1].set_coord_type('scalar')
ax.coords[0].set_major_formatter('x.xxx')
ax.coords[1].set_major_formatter('x.xxx')
ax.coords[0].set_ticks(exclude_overlapping=True)
ax.coords[1].set_ticks(exclude_overlapping=True)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='test_ticks_regression_1.png',
tolerance=0, style={})
def test_ticks_regression(self):
# Regression test for a bug that caused ticks aligned exactly with a
# sampled frame point to not appear. This also checks that tick labels
# don't get added more than once, and that no error occurs when e.g.
# the top part of the frame is all at the same coordinate as one of the
# potential ticks (which causes the tick angle calculation to return
# NaN).
wcs = WCS(self.slice_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5],
projection=wcs, aspect='auto')
limits = wcs.wcs_world2pix([0, 0], [35e3, 80e3], 0)[1]
ax.set_ylim(*limits)
ax.coords[0].set_ticks(spacing=0.002 * u.deg)
ax.coords[1].set_ticks(spacing=5 * u.km / u.s)
ax.coords[0].set_ticklabel(alpha=0.5) # to see multiple labels
ax.coords[1].set_ticklabel(alpha=0.5)
ax.coords[0].set_ticklabel_position('all')
ax.coords[1].set_ticklabel_position('all')
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='test_axislabels_regression.png',
savefig_kwargs={'bbox_inches': 'tight'},
tolerance=0, style={})
def test_axislabels_regression(self):
# Regression test for a bug that meant that if tick labels were made
# invisible with ``set_visible(False)``, they were still added to the
# list of bounding boxes for tick labels, but with default values of 0
# to 1, which caused issues.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect='auto')
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
ax.coords[1].set_axislabel_visibility_rule('always')
ax.coords[1].ticklabels.set_visible(False)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
savefig_kwargs={'bbox_inches': 'tight'},
tolerance=0, style={})
def test_noncelestial_angular(self, tmpdir):
# Regression test for a bug that meant that when passing a WCS that had
# angular axes and using set_coord_type to set the coordinates to
# longitude/latitude, but where the WCS wasn't recognized as celestial,
# the WCS units are not converted to deg, so we can't assume that
# transform will always return degrees.
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['solar-x', 'solar-y']
wcs.wcs.cunit = ['arcsec', 'arcsec']
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(1, 1, 1, projection=wcs)
ax.imshow(np.zeros([1024, 1024]), origin='lower')
ax.coords[0].set_coord_type('longitude', coord_wrap=180)
ax.coords[1].set_coord_type('latitude')
ax.coords[0].set_major_formatter('s.s')
ax.coords[1].set_major_formatter('s.s')
ax.coords[0].set_format_unit(u.arcsec, show_decimal_unit=False)
ax.coords[1].set_format_unit(u.arcsec, show_decimal_unit=False)
ax.grid(color='white', ls='solid')
# Force drawing (needed for format_coord)
fig.savefig(tmpdir.join('nothing').strpath)
assert ax.format_coord(512, 512) == '513.0 513.0 (world)'
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
savefig_kwargs={'bbox_inches': 'tight'},
tolerance=0, style={})
def test_patches_distortion(self, tmpdir):
# Check how patches get distorted (and make sure that scatter markers
# and SphericalCircle don't)
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect='equal')
# Pixel coordinates
r = Rectangle((30., 50.), 60., 50., edgecolor='green', facecolor='none')
ax.add_patch(r)
# FK5 coordinates
r = Rectangle((266.4, -28.9), 0.3, 0.3, edgecolor='cyan', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(r)
# FK5 coordinates
c = Circle((266.4, -29.1), 0.15, edgecolor='magenta', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(c)
# Pixel coordinates
ax.scatter([40, 100, 130], [30, 130, 60], s=100, edgecolor='red', facecolor=(1, 0, 0, 0.5))
# World coordinates (should not be distorted)
ax.scatter(266.78238, -28.769255, transform=ax.get_transform('fk5'), s=300,
edgecolor='red', facecolor='none')
# World coordinates (should not be distorted)
r = SphericalCircle((266.4 * u.deg, -29.1 * u.deg), 0.15 * u.degree,
edgecolor='purple', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(r)
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticklabel_visible(False)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_elliptical_frame(self):
# Regression test for a bug (astropy/astropy#6063) that caused labels to
# be incorrectly simplified.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(5, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, frame_class=EllipticalFrame)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_hms_labels(self):
# This tests the apparance of the hms superscripts in tick labels
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.3, 0.2, 0.65, 0.6],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={'text.usetex': True})
def test_latex_labels(self):
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.3, 0.2, 0.65, 0.6],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
|
96ac115746212aea77877f563fb206d476276159cf94023eaf6fae21b1a521d2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the base CCDData class.
import textwrap
import numpy as np
import pytest
from ...io import fits
from ..nduncertainty import StdDevUncertainty, MissingDataAssociationException
from ... import units as u
from ... import log
from ...wcs import WCS, FITSFixedWarning
from ...tests.helper import catch_warnings
from ...utils import NumpyRNGContext
from ...utils.data import (get_pkg_data_filename, get_pkg_data_filenames,
get_pkg_data_contents)
from ..ccddata import CCDData
# If additional pytest markers are defined the key in the dictionary below
# should be the name of the marker.
DEFAULTS = {
'seed': 123,
'data_size': 100,
'data_scale': 1.0,
'data_mean': 0.0
}
DEFAULT_SEED = 123
DEFAULT_DATA_SIZE = 100
DEFAULT_DATA_SCALE = 1.0
def value_from_markers(key, request):
try:
val = request.keywords[key].args[0]
except KeyError:
val = DEFAULTS[key]
return val
@pytest.fixture
def ccd_data(request):
"""
Return a CCDData object with units of ADU.
The size of the data array is 100x100 but can be changed using the marker
@pytest.mark.data_size(N) on the test function, where N should be the
desired dimension.
Data values are initialized to random numbers drawn from a normal
distribution with mean of 0 and scale 1.
The scale can be changed with the marker @pytest.marker.scale(s) on the
test function, where s is the desired scale.
The mean can be changed with the marker @pytest.marker.scale(m) on the
test function, where m is the desired mean.
"""
size = value_from_markers('data_size', request)
scale = value_from_markers('data_scale', request)
mean = value_from_markers('data_mean', request)
with NumpyRNGContext(DEFAULTS['seed']):
data = np.random.normal(loc=mean, size=[size, size], scale=scale)
fake_meta = {'my_key': 42, 'your_key': 'not 42'}
ccd = CCDData(data, unit=u.adu)
ccd.header = fake_meta
return ccd
def test_ccddata_empty():
with pytest.raises(TypeError):
CCDData() # empty initializer should fail
def test_ccddata_must_have_unit():
with pytest.raises(ValueError):
CCDData(np.zeros([100, 100]))
def test_ccddata_unit_cannot_be_set_to_none(ccd_data):
with pytest.raises(TypeError):
ccd_data.unit = None
def test_ccddata_meta_header_conflict():
with pytest.raises(ValueError) as exc:
CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2})
assert "can't have both header and meta." in str(exc)
@pytest.mark.data_size(10)
def test_ccddata_simple(ccd_data):
assert ccd_data.shape == (10, 10)
assert ccd_data.size == 100
assert ccd_data.dtype == np.dtype(float)
def test_ccddata_init_with_string_electron_unit():
ccd = CCDData(np.zeros((10, 10)), unit="electron")
assert ccd.unit is u.electron
@pytest.mark.data_size(10)
def test_initialize_from_FITS(ccd_data, tmpdir):
hdu = fits.PrimaryHDU(ccd_data)
hdulist = fits.HDUList([hdu])
filename = tmpdir.join('afile.fits').strpath
hdulist.writeto(filename)
cd = CCDData.read(filename, unit=u.electron)
assert cd.shape == (10, 10)
assert cd.size == 100
assert np.issubdtype(cd.data.dtype, np.floating)
for k, v in hdu.header.items():
assert cd.meta[k] == v
def test_initialize_from_fits_with_unit_in_header(tmpdir):
fake_img = np.random.random(size=(100, 100))
hdu = fits.PrimaryHDU(fake_img)
hdu.header['bunit'] = u.adu.to_string()
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
ccd = CCDData.read(filename)
# ccd should pick up the unit adu from the fits header...did it?
assert ccd.unit is u.adu
# An explicit unit in the read overrides any unit in the FITS file
ccd2 = CCDData.read(filename, unit="photon")
assert ccd2.unit is u.photon
def test_initialize_from_fits_with_ADU_in_header(tmpdir):
fake_img = np.random.random(size=(100, 100))
hdu = fits.PrimaryHDU(fake_img)
hdu.header['bunit'] = 'ADU'
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
ccd = CCDData.read(filename)
# ccd should pick up the unit adu from the fits header...did it?
assert ccd.unit is u.adu
def test_initialize_from_fits_with_invalid_unit_in_header(tmpdir):
hdu = fits.PrimaryHDU(np.ones((2, 2)))
hdu.header['bunit'] = 'definetely-not-a-unit'
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
with pytest.raises(ValueError):
CCDData.read(filename)
def test_initialize_from_fits_with_data_in_different_extension(tmpdir):
fake_img = np.random.random(size=(100, 100))
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(fake_img)
hdus = fits.HDUList([hdu1, hdu2])
filename = tmpdir.join('afile.fits').strpath
hdus.writeto(filename)
with catch_warnings(FITSFixedWarning) as w:
ccd = CCDData.read(filename, unit='adu')
assert len(w) == 0
# ccd should pick up the unit adu from the fits header...did it?
np.testing.assert_array_equal(ccd.data, fake_img)
# check that the header is the combined header
assert hdu2.header + hdu1.header == ccd.header
def test_initialize_from_fits_with_extension(tmpdir):
fake_img1 = np.random.random(size=(100, 100))
fake_img2 = np.random.random(size=(100, 100))
hdu0 = fits.PrimaryHDU()
hdu1 = fits.ImageHDU(fake_img1)
hdu2 = fits.ImageHDU(fake_img2)
hdus = fits.HDUList([hdu0, hdu1, hdu2])
filename = tmpdir.join('afile.fits').strpath
hdus.writeto(filename)
ccd = CCDData.read(filename, hdu=2, unit='adu')
# ccd should pick up the unit adu from the fits header...did it?
np.testing.assert_array_equal(ccd.data, fake_img2)
def test_write_unit_to_hdu(ccd_data, tmpdir):
ccd_unit = ccd_data.unit
hdulist = ccd_data.to_hdu()
assert 'bunit' in hdulist[0].header
assert hdulist[0].header['bunit'] == ccd_unit.to_string()
def test_initialize_from_FITS_bad_keyword_raises_error(ccd_data, tmpdir):
# There are two fits.open keywords that are not permitted in ccdproc:
# do_not_scale_image_data and scale_back
filename = tmpdir.join('test.fits').strpath
ccd_data.write(filename)
with pytest.raises(TypeError):
CCDData.read(filename, unit=ccd_data.unit,
do_not_scale_image_data=True)
with pytest.raises(TypeError):
CCDData.read(filename, unit=ccd_data.unit, scale_back=True)
def test_ccddata_writer(ccd_data, tmpdir):
filename = tmpdir.join('test.fits').strpath
ccd_data.write(filename)
ccd_disk = CCDData.read(filename, unit=ccd_data.unit)
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
def test_ccddata_meta_is_case_sensitive(ccd_data):
key = 'SoMeKEY'
ccd_data.meta[key] = 10
assert key.lower() not in ccd_data.meta
assert key.upper() not in ccd_data.meta
assert key in ccd_data.meta
def test_ccddata_meta_is_not_fits_header(ccd_data):
ccd_data.meta = {'OBSERVER': 'Edwin Hubble'}
assert not isinstance(ccd_data.meta, fits.Header)
def test_fromMEF(ccd_data, tmpdir):
hdu = fits.PrimaryHDU(ccd_data)
hdu2 = fits.PrimaryHDU(2 * ccd_data.data)
hdulist = fits.HDUList(hdu)
hdulist.append(hdu2)
filename = tmpdir.join('afile.fits').strpath
hdulist.writeto(filename)
# by default, we reading from the first extension
cd = CCDData.read(filename, unit=u.electron)
np.testing.assert_array_equal(cd.data, ccd_data.data)
# but reading from the second should work too
cd = CCDData.read(filename, hdu=1, unit=u.electron)
np.testing.assert_array_equal(cd.data, 2 * ccd_data.data)
def test_metafromheader(ccd_data):
hdr = fits.header.Header()
hdr['observer'] = 'Edwin Hubble'
hdr['exptime'] = '3600'
d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron)
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
assert d1.header['OBSERVER'] == 'Edwin Hubble'
def test_metafromdict():
dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600}
d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron)
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
def test_header2meta():
hdr = fits.header.Header()
hdr['observer'] = 'Edwin Hubble'
hdr['exptime'] = '3600'
d1 = CCDData(np.ones((5, 5)), unit=u.electron)
d1.header = hdr
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
assert d1.header['OBSERVER'] == 'Edwin Hubble'
def test_metafromstring_fail():
hdr = 'this is not a valid header'
with pytest.raises(TypeError):
CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu)
def test_setting_bad_uncertainty_raises_error(ccd_data):
with pytest.raises(TypeError):
# Uncertainty is supposed to be an instance of NDUncertainty
ccd_data.uncertainty = 10
def test_setting_uncertainty_with_array(ccd_data):
ccd_data.uncertainty = None
fake_uncertainty = np.sqrt(np.abs(ccd_data.data))
ccd_data.uncertainty = fake_uncertainty.copy()
np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty)
def test_setting_uncertainty_wrong_shape_raises_error(ccd_data):
with pytest.raises(ValueError):
ccd_data.uncertainty = np.random.random(size=(3, 4))
def test_to_hdu(ccd_data):
ccd_data.meta = {'observer': 'Edwin Hubble'}
fits_hdulist = ccd_data.to_hdu()
assert isinstance(fits_hdulist, fits.HDUList)
for k, v in ccd_data.meta.items():
assert fits_hdulist[0].header[k] == v
np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data)
def test_copy(ccd_data):
ccd_copy = ccd_data.copy()
np.testing.assert_array_equal(ccd_copy.data, ccd_data.data)
assert ccd_copy.unit == ccd_data.unit
assert ccd_copy.meta == ccd_data.meta
@pytest.mark.parametrize('operation,affects_uncertainty', [
("multiply", True),
("divide", True),
])
@pytest.mark.parametrize('operand', [
2.0,
2 * u.dimensionless_unscaled,
2 * u.photon / u.adu,
])
@pytest.mark.parametrize('with_uncertainty', [
True,
False])
@pytest.mark.data_unit(u.adu)
def test_mult_div_overload(ccd_data, operand, with_uncertainty,
operation, affects_uncertainty):
if with_uncertainty:
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
method = ccd_data.__getattribute__(operation)
np_method = np.__getattribute__(operation)
result = method(operand)
assert result is not ccd_data
assert isinstance(result, CCDData)
assert (result.uncertainty is None or
isinstance(result.uncertainty, StdDevUncertainty))
try:
op_value = operand.value
except AttributeError:
op_value = operand
np.testing.assert_array_equal(result.data,
np_method(ccd_data.data, op_value))
if with_uncertainty:
if affects_uncertainty:
np.testing.assert_array_equal(result.uncertainty.array,
np_method(ccd_data.uncertainty.array,
op_value))
else:
np.testing.assert_array_equal(result.uncertainty.array,
ccd_data.uncertainty.array)
else:
assert result.uncertainty is None
if isinstance(operand, u.Quantity):
# Need the "1 *" below to force arguments to be Quantity to work around
# astropy/astropy#2377
expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit
assert result.unit == expected_unit
else:
assert result.unit == ccd_data.unit
@pytest.mark.parametrize('operation,affects_uncertainty', [
("add", False),
("subtract", False),
])
@pytest.mark.parametrize('operand,expect_failure', [
(2.0, u.UnitsError), # fail--units don't match image
(2 * u.dimensionless_unscaled, u.UnitsError), # same
(2 * u.adu, False),
])
@pytest.mark.parametrize('with_uncertainty', [
True,
False])
@pytest.mark.data_unit(u.adu)
def test_add_sub_overload(ccd_data, operand, expect_failure, with_uncertainty,
operation, affects_uncertainty):
if with_uncertainty:
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
method = ccd_data.__getattribute__(operation)
np_method = np.__getattribute__(operation)
if expect_failure:
with pytest.raises(expect_failure):
result = method(operand)
return
else:
result = method(operand)
assert result is not ccd_data
assert isinstance(result, CCDData)
assert (result.uncertainty is None or
isinstance(result.uncertainty, StdDevUncertainty))
try:
op_value = operand.value
except AttributeError:
op_value = operand
np.testing.assert_array_equal(result.data,
np_method(ccd_data.data, op_value))
if with_uncertainty:
if affects_uncertainty:
np.testing.assert_array_equal(result.uncertainty.array,
np_method(ccd_data.uncertainty.array,
op_value))
else:
np.testing.assert_array_equal(result.uncertainty.array,
ccd_data.uncertainty.array)
else:
assert result.uncertainty is None
if isinstance(operand, u.Quantity):
assert (result.unit == ccd_data.unit and result.unit == operand.unit)
else:
assert result.unit == ccd_data.unit
def test_arithmetic_overload_fails(ccd_data):
with pytest.raises(TypeError):
ccd_data.multiply("five")
with pytest.raises(TypeError):
ccd_data.divide("five")
with pytest.raises(TypeError):
ccd_data.add("five")
with pytest.raises(TypeError):
ccd_data.subtract("five")
def test_arithmetic_no_wcs_compare():
ccd = CCDData(np.ones((10, 10)), unit='')
assert ccd.add(ccd, compare_wcs=None).wcs is None
assert ccd.subtract(ccd, compare_wcs=None).wcs is None
assert ccd.multiply(ccd, compare_wcs=None).wcs is None
assert ccd.divide(ccd, compare_wcs=None).wcs is None
def test_arithmetic_with_wcs_compare():
def return_diff_smaller_3(first, second):
return abs(first - second) <= 3
ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2)
ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5)
assert ccd1.add(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2
assert ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2
assert ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2
assert ccd1.divide(ccd2, compare_wcs=return_diff_smaller_3).wcs == 2
def test_arithmetic_with_wcs_compare_fail():
def return_diff_smaller_1(first, second):
return abs(first - second) <= 1
ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=2)
ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=5)
with pytest.raises(ValueError):
ccd1.add(ccd2, compare_wcs=return_diff_smaller_1).wcs
with pytest.raises(ValueError):
ccd1.subtract(ccd2, compare_wcs=return_diff_smaller_1).wcs
with pytest.raises(ValueError):
ccd1.multiply(ccd2, compare_wcs=return_diff_smaller_1).wcs
with pytest.raises(ValueError):
ccd1.divide(ccd2, compare_wcs=return_diff_smaller_1).wcs
def test_arithmetic_overload_ccddata_operand(ccd_data):
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
operand = ccd_data.copy()
result = ccd_data.add(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
2 * ccd_data.data)
np.testing.assert_array_equal(result.uncertainty.array,
np.sqrt(2) * ccd_data.uncertainty.array)
result = ccd_data.subtract(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
0 * ccd_data.data)
np.testing.assert_array_equal(result.uncertainty.array,
np.sqrt(2) * ccd_data.uncertainty.array)
result = ccd_data.multiply(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
ccd_data.data ** 2)
expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) *
ccd_data.uncertainty.array)
np.testing.assert_allclose(result.uncertainty.array,
expected_uncertainty)
result = ccd_data.divide(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
np.ones_like(ccd_data.data))
expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) *
ccd_data.uncertainty.array)
np.testing.assert_allclose(result.uncertainty.array,
expected_uncertainty)
def test_arithmetic_overload_differing_units():
a = np.array([1, 2, 3]) * u.m
b = np.array([1, 2, 3]) * u.cm
ccddata = CCDData(a)
# TODO: Could also be parametrized.
res = ccddata.add(b)
np.testing.assert_array_almost_equal(res.data, np.add(a, b).value)
assert res.unit == np.add(a, b).unit
res = ccddata.subtract(b)
np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value)
assert res.unit == np.subtract(a, b).unit
res = ccddata.multiply(b)
np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value)
assert res.unit == np.multiply(a, b).unit
res = ccddata.divide(b)
np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value)
assert res.unit == np.divide(a, b).unit
def test_arithmetic_add_with_array():
ccd = CCDData(np.ones((3, 3)), unit='')
res = ccd.add(np.arange(3))
np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3)
ccd = CCDData(np.ones((3, 3)), unit='adu')
with pytest.raises(ValueError):
ccd.add(np.arange(3))
def test_arithmetic_subtract_with_array():
ccd = CCDData(np.ones((3, 3)), unit='')
res = ccd.subtract(np.arange(3))
np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3)
ccd = CCDData(np.ones((3, 3)), unit='adu')
with pytest.raises(ValueError):
ccd.subtract(np.arange(3))
def test_arithmetic_multiply_with_array():
ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m)
res = ccd.multiply(np.ones((3, 3)) * 2)
np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3)
assert res.unit == ccd.unit
def test_arithmetic_divide_with_array():
ccd = CCDData(np.ones((3, 3)), unit=u.m)
res = ccd.divide(np.ones((3, 3)) * 2)
np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3)
assert res.unit == ccd.unit
def test_history_preserved_if_metadata_is_fits_header(tmpdir):
fake_img = np.random.random(size=(100, 100))
hdu = fits.PrimaryHDU(fake_img)
hdu.header['history'] = 'one'
hdu.header['history'] = 'two'
hdu.header['history'] = 'three'
assert len(hdu.header['history']) == 3
tmp_file = tmpdir.join('temp.fits').strpath
hdu.writeto(tmp_file)
ccd_read = CCDData.read(tmp_file, unit="adu")
assert ccd_read.header['history'] == hdu.header['history']
def test_infol_logged_if_unit_in_fits_header(ccd_data, tmpdir):
tmpfile = tmpdir.join('temp.fits')
ccd_data.write(tmpfile.strpath)
log.setLevel('INFO')
explicit_unit_name = "photon"
with log.log_to_list() as log_list:
ccd_from_disk = CCDData.read(tmpfile.strpath, unit=explicit_unit_name)
assert explicit_unit_name in log_list[0].message
def test_wcs_attribute(ccd_data, tmpdir):
"""
Check that WCS attribute gets added to header, and that if a CCDData
object is created from a FITS file with a header, and the WCS attribute
is modified, then the CCDData object is turned back into an hdu, the
WCS object overwrites the old WCS information in the header.
"""
tmpfile = tmpdir.join('temp.fits')
# This wcs example is taken from the astropy.wcs docs.
wcs = WCS(naxis=2)
wcs.wcs.crpix = np.array(ccd_data.shape) / 2
wcs.wcs.cdelt = np.array([-0.066667, 0.066667])
wcs.wcs.crval = [0, -90]
wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"]
wcs.wcs.set_pv([(2, 1, 45.0)])
ccd_data.header = ccd_data.to_hdu()[0].header
ccd_data.header.extend(wcs.to_header(), useblanks=False)
ccd_data.write(tmpfile.strpath)
# Get the header length after it has been extended by the WCS keywords
original_header_length = len(ccd_data.header)
ccd_new = CCDData.read(tmpfile.strpath)
# WCS attribute should be set for ccd_new
assert ccd_new.wcs is not None
# WCS attribute should be equal to wcs above.
assert ccd_new.wcs.wcs == wcs.wcs
# Converting CCDData object with wcs to an hdu shouldn't
# create duplicate wcs-related entries in the header.
ccd_new_hdu = ccd_new.to_hdu()[0]
assert len(ccd_new_hdu.header) == original_header_length
# Making a CCDData with WCS (but not WCS in the header) should lead to
# WCS information in the header when it is converted to an HDU.
ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu")
hdu = ccd_wcs_not_in_header.to_hdu()[0]
wcs_header = wcs.to_header()
for k in wcs_header.keys():
# Skip these keywords if they are in the WCS header because they are
# not WCS-specific.
if k in ['', 'COMMENT', 'HISTORY']:
continue
# No keyword from the WCS should be in the header.
assert k not in ccd_wcs_not_in_header.header
# Every keyword in the WCS should be in the header of the HDU
assert hdu.header[k] == wcs_header[k]
# Now check that if WCS of a CCDData is modified, then the CCDData is
# converted to an HDU, the WCS keywords in the header are overwritten
# with the appropriate keywords from the header.
#
# ccd_new has a WCS and WCS keywords in the header, so try modifying
# the WCS.
ccd_new.wcs.wcs.cdelt *= 2
ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0]
assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0]
assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1]
def test_wcs_keywords_removed_from_header():
"""
Test, for the file included with the nddata tests, that WCS keywords are
properly removed from header.
"""
from ..ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
data_file = get_pkg_data_filename('data/sip-wcs.fits')
ccd = CCDData.read(data_file)
wcs_header = ccd.wcs.to_header()
assert not (set(wcs_header) & set(ccd.meta) - keepers)
# Make sure that exceptions are not raised when trying to remove missing
# keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'.
data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits')
ccd = CCDData.read(data_file1, unit='count')
def test_wcs_keyword_removal_for_wcs_test_files():
"""
Test, for the WCS test files, that keyword removall works as
expected. Those cover a much broader range of WCS types than
test_wcs_keywords_removed_from_header
"""
from ..ccddata import _generate_wcs_and_update_header
from ..ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
wcs_headers = get_pkg_data_filenames('../../wcs/tests/data',
pattern='*.hdr')
for hdr in wcs_headers:
# Skip the files that are expected to be bad...
if 'invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr:
continue
header_string = get_pkg_data_contents(hdr)
wcs = WCS(header_string)
header = wcs.to_header(relax=True)
new_header, new_wcs = _generate_wcs_and_update_header(header)
# Make sure all of the WCS-related keywords have been removed.
assert not (set(new_header) &
set(new_wcs.to_header(relax=True)) -
keepers)
# Check that the new wcs is the same as the old.
new_wcs_header = new_wcs.to_header(relax=True)
for k, v in new_wcs_header.items():
if isinstance(v, str):
assert header[k] == v
else:
np.testing.assert_almost_equal(header[k], v)
def test_read_wcs_not_creatable(tmpdir):
# The following Header can't be converted to a WCS object. See also #6499.
hdr_txt_example_WCS = textwrap.dedent('''
SIMPLE = T / Fits standard
BITPIX = 16 / Bits per pixel
NAXIS = 2 / Number of axes
NAXIS1 = 1104 / Axis length
NAXIS2 = 4241 / Axis length
CRVAL1 = 164.98110962 / Physical value of the reference pixel X
CRVAL2 = 44.34089279 / Physical value of the reference pixel Y
CRPIX1 = -34.0 / Reference pixel in X (pixel)
CRPIX2 = 2041.0 / Reference pixel in Y (pixel)
CDELT1 = 0.10380000 / X Scale projected on detector (#/pix)
CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix)
CTYPE1 = 'RA---TAN' / Pixel coordinate system
CTYPE2 = 'WAVELENGTH' / Pixel coordinate system
CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1
CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2
CD1_1 = 0.20760000 / Pixel Coordinate translation matrix
CD1_2 = 0.00000000 / Pixel Coordinate translation matrix
CD2_1 = 0.00000000 / Pixel Coordinate translation matrix
CD2_2 = 0.10380000 / Pixel Coordinate translation matrix
C2YPE1 = 'RA---TAN' / Pixel coordinate system
C2YPE2 = 'DEC--TAN' / Pixel coordinate system
C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1
C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2
RADECSYS= 'FK5 ' / The equatorial coordinate system
''')
with catch_warnings(FITSFixedWarning):
hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n')
hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)])
filename = tmpdir.join('afile.fits').strpath
hdul.writeto(filename)
# The hdr cannot be converted to a WCS object because of an
# InconsistentAxisTypesError but it should still open the file
ccd = CCDData.read(filename, unit='adu')
assert ccd.wcs is None
def test_header(ccd_data):
a = {'Observer': 'Hubble'}
ccd = CCDData(ccd_data, header=a)
assert ccd.meta == a
def test_wcs_arithmetic(ccd_data):
ccd_data.wcs = 5
result = ccd_data.multiply(1.0)
assert result.wcs == 5
@pytest.mark.parametrize('operation',
['multiply', 'divide', 'add', 'subtract'])
def test_wcs_arithmetic_ccd(ccd_data, operation):
ccd_data2 = ccd_data.copy()
ccd_data.wcs = 5
method = ccd_data.__getattribute__(operation)
result = method(ccd_data2)
assert result.wcs == ccd_data.wcs
assert ccd_data2.wcs is None
def test_wcs_sip_handling():
"""
Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive
a roundtrip unchanged.
"""
data_file = get_pkg_data_filename('data/sip-wcs.fits')
def check_wcs_ctypes(header):
expected_wcs_ctypes = {
'CTYPE1': 'RA---TAN-SIP',
'CTYPE2': 'DEC--TAN-SIP'
}
return [header[k] == v for k, v in expected_wcs_ctypes.items()]
ccd_original = CCDData.read(data_file)
# After initialization the keywords should be in the WCS, not in the
# meta.
with fits.open(data_file) as raw:
good_ctype = check_wcs_ctypes(raw[0].header)
assert all(good_ctype)
ccd_new = ccd_original.to_hdu()
good_ctype = check_wcs_ctypes(ccd_new[0].header)
assert all(good_ctype)
# Try converting to header with wcs_relax=False and
# the header should contain the CTYPE keywords without
# the -SIP
ccd_no_relax = ccd_original.to_hdu(wcs_relax=False)
good_ctype = check_wcs_ctypes(ccd_no_relax[0].header)
assert not any(good_ctype)
assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN'
assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN'
@pytest.mark.parametrize('operation',
['multiply', 'divide', 'add', 'subtract'])
def test_mask_arithmetic_ccd(ccd_data, operation):
ccd_data2 = ccd_data.copy()
ccd_data.mask = (ccd_data.data > 0)
method = ccd_data.__getattribute__(operation)
result = method(ccd_data2)
np.testing.assert_equal(result.mask, ccd_data.mask)
def test_write_read_multiextensionfits_mask_default(ccd_data, tmpdir):
# Test that if a mask is present the mask is saved and loaded by default.
ccd_data.mask = ccd_data.data > 10
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename)
ccd_after = CCDData.read(filename)
assert ccd_after.mask is not None
np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask)
def test_write_read_multiextensionfits_uncertainty_default(ccd_data, tmpdir):
# Test that if a uncertainty is present it is saved and loaded by default.
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename)
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is not None
np.testing.assert_array_equal(ccd_data.uncertainty.array,
ccd_after.uncertainty.array)
def test_write_read_multiextensionfits_not(ccd_data, tmpdir):
# Test that writing mask and uncertainty can be disabled
ccd_data.mask = ccd_data.data > 10
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None)
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is None
assert ccd_after.mask is None
def test_write_read_multiextensionfits_custom_ext_names(ccd_data, tmpdir):
# Test writing mask, uncertainty in another extension than default
ccd_data.mask = ccd_data.data > 10
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun')
# Try reading with defaults extension names
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is None
assert ccd_after.mask is None
# Try reading with custom extension names
ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun')
assert ccd_after.uncertainty is not None
assert ccd_after.mask is not None
np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask)
np.testing.assert_array_equal(ccd_data.uncertainty.array,
ccd_after.uncertainty.array)
def test_wcs(ccd_data):
ccd_data.wcs = 5
assert ccd_data.wcs == 5
def test_recognized_fits_formats_for_read_write(ccd_data, tmpdir):
# These are the extensions that are supposed to be supported.
supported_extensions = ['fit', 'fits', 'fts']
for ext in supported_extensions:
path = tmpdir.join("test.{}".format(ext))
ccd_data.write(path.strpath)
from_disk = CCDData.read(path.strpath)
assert (ccd_data.data == from_disk.data).all()
def test_stddevuncertainty_compat_descriptor_no_parent():
with pytest.raises(MissingDataAssociationException):
StdDevUncertainty(np.ones((10, 10))).parent_nddata
def test_stddevuncertainty_compat_descriptor_no_weakref():
# TODO: Remove this test if astropy 1.0 isn't supported anymore
# This test might create a Memoryleak on purpose, so the last lines after
# the assert are IMPORTANT cleanup.
ccd = CCDData(np.ones((10, 10)), unit='')
uncert = StdDevUncertainty(np.ones((10, 10)))
uncert._parent_nddata = ccd
assert uncert.parent_nddata is ccd
uncert._parent_nddata = None
|
9585260b1877ec05268bb340c976d262e03ad70d6ae1b9e88b1c2f24f8dbfd83 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from ...tests.helper import assert_quantity_allclose
from ..utils import (extract_array, add_array, subpixel_indices,
block_reduce, block_replicate,
overlap_slices, NoOverlapError, PartialOverlapError,
Cutout2D)
from ...wcs import WCS, Sip
from ...wcs.utils import proj_plane_pixel_area
from ...coordinates import SkyCoord
from ... import units as u
try:
import skimage # pylint: disable=W0611
HAS_SKIMAGE = True
except ImportError:
HAS_SKIMAGE = False
test_positions = [(10.52, 3.12), (5.62, 12.97), (31.33, 31.77),
(0.46, 0.94), (20.45, 12.12), (42.24, 24.42)]
test_position_indices = [(0, 3), (0, 2), (4, 1),
(4, 2), (4, 3), (3, 4)]
test_slices = [slice(10.52, 3.12), slice(5.62, 12.97),
slice(31.33, 31.77), slice(0.46, 0.94),
slice(20.45, 12.12), slice(42.24, 24.42)]
subsampling = 5
test_pos_bad = [(-1, -4), (-1, 0), (6, 2), (6, 6)]
def test_slices_different_dim():
'''Overlap from arrays with different number of dim is undefined.'''
with pytest.raises(ValueError) as e:
overlap_slices((4, 5, 6), (1, 2), (0, 0))
assert "the same number of dimensions" in str(e.value)
def test_slices_pos_different_dim():
'''Position must have same dim as arrays.'''
with pytest.raises(ValueError) as e:
overlap_slices((4, 5), (1, 2), (0, 0, 3))
assert "the same number of dimensions" in str(e.value)
@pytest.mark.parametrize('pos', test_pos_bad)
def test_slices_no_overlap(pos):
'''If there is no overlap between arrays, an error should be raised.'''
with pytest.raises(NoOverlapError):
overlap_slices((5, 5), (2, 2), pos)
def test_slices_partial_overlap():
'''Compute a slice for partially overlapping arrays.'''
temp = overlap_slices((5,), (3,), (0,))
assert temp == ((slice(0, 2, None),), (slice(1, 3, None),))
temp = overlap_slices((5,), (3,), (0,), mode='partial')
assert temp == ((slice(0, 2, None),), (slice(1, 3, None),))
for pos in [0, 4]:
with pytest.raises(PartialOverlapError) as e:
temp = overlap_slices((5,), (3,), (pos,), mode='strict')
assert 'Arrays overlap only partially.' in str(e.value)
def test_slices_overlap_wrong_mode():
'''Call overlap_slices with non-existing mode.'''
with pytest.raises(ValueError) as e:
overlap_slices((5,), (3,), (0,), mode='full')
assert "Mode can be only" in str(e.value)
def test_extract_array_wrong_mode():
'''Call extract_array with non-existing mode.'''
with pytest.raises(ValueError) as e:
extract_array(np.arange(4), (2, ), (0, ), mode='full')
assert "Valid modes are 'partial', 'trim', and 'strict'." == str(e.value)
def test_extract_array_1d_even():
'''Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
'''
assert np.all(extract_array(np.arange(4), (2, ), (0, ), fill_value=-99) == np.array([-99, 0]))
for i in [1, 2, 3]:
assert np.all(extract_array(np.arange(4), (2, ), (i, )) == np.array([i - 1, i]))
assert np.all(extract_array(np.arange(4.), (2, ), (4, ), fill_value=np.inf) == np.array([3, np.inf]))
def test_extract_array_1d_odd():
'''Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
The first few lines test the most error-prone part: Extraction of an
array on the boundaries.
Additional tests (e.g. dtype of return array) are done for the last
case only.
'''
assert np.all(extract_array(np.arange(4), (3,), (-1, ), fill_value=-99) == np.array([-99, -99, 0]))
assert np.all(extract_array(np.arange(4), (3,), (0, ), fill_value=-99) == np.array([-99, 0, 1]))
for i in [1, 2]:
assert np.all(extract_array(np.arange(4), (3,), (i, )) == np.array([i-1, i, i+1]))
assert np.all(extract_array(np.arange(4), (3,), (3, ), fill_value=-99) == np.array([2, 3, -99]))
arrayin = np.arange(4.)
extracted = extract_array(arrayin, (3,), (4, ))
assert extracted[0] == 3
assert np.isnan(extracted[1]) # since I cannot use `==` to test for nan
assert extracted.dtype == arrayin.dtype
def test_extract_array_1d():
"""In 1d, shape can be int instead of tuple"""
assert np.all(extract_array(np.arange(4), 3, (-1, ), fill_value=-99) == np.array([-99, -99, 0]))
assert np.all(extract_array(np.arange(4), 3, -1, fill_value=-99) == np.array([-99, -99, 0]))
def test_extract_Array_float():
"""integer is at bin center"""
for a in np.arange(2.51, 3.49, 0.1):
assert np.all(extract_array(np.arange(5), 3, a) == np.array([2, 3, 4]))
def test_extract_array_1d_trim():
'''Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
'''
assert np.all(extract_array(np.arange(4), (2, ), (0, ), mode='trim') == np.array([0]))
for i in [1, 2, 3]:
assert np.all(extract_array(np.arange(4), (2, ), (i, ), mode='trim') == np.array([i - 1, i]))
assert np.all(extract_array(np.arange(4.), (2, ), (4, ), mode='trim') == np.array([3]))
@pytest.mark.parametrize('mode', ['partial', 'trim', 'strict'])
def test_extract_array_easy(mode):
"""
Test extract_array utility function.
Test by extracting an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((5, 5))
large_test_array[3:8, 3:8] = small_test_array
extracted_array = extract_array(large_test_array, (5, 5), (5, 5), mode=mode)
assert np.all(extracted_array == small_test_array)
def test_extract_array_return_pos():
'''Check that the return position is calculated correctly.
The result will differ by mode. All test here are done in 1d because it's
easier to construct correct test cases.
'''
large_test_array = np.arange(5)
for i in np.arange(-1, 6):
extracted, new_pos = extract_array(large_test_array, 3, i,
mode='partial', return_position=True)
assert new_pos == (1, )
# Now check an array with an even number
for i, expected in zip([1.49, 1.51, 3], [1.49, 0.51, 1]):
extracted, new_pos = extract_array(large_test_array, (2,), (i,),
mode='strict', return_position=True)
assert new_pos == (expected, )
# For mode='trim' the answer actually depends
for i, expected in zip(np.arange(-1, 6), (-1, 0, 1, 1, 1, 1, 1)):
extracted, new_pos = extract_array(large_test_array, (3,), (i,),
mode='trim', return_position=True)
assert new_pos == (expected, )
def test_add_array_odd_shape():
"""
Test add_array utility function.
Test by adding an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((5, 5))
large_test_array_ref = large_test_array.copy()
large_test_array_ref[3:8, 3:8] += small_test_array
added_array = add_array(large_test_array, small_test_array, (5, 5))
assert np.all(added_array == large_test_array_ref)
def test_add_array_even_shape():
"""
Test add_array_2D utility function.
Test by adding an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((4, 4))
large_test_array_ref = large_test_array.copy()
large_test_array_ref[0:2, 0:2] += small_test_array[2:4, 2:4]
added_array = add_array(large_test_array, small_test_array, (0, 0))
assert np.all(added_array == large_test_array_ref)
@pytest.mark.parametrize(('position', 'subpixel_index'),
zip(test_positions, test_position_indices))
def test_subpixel_indices(position, subpixel_index):
"""
Test subpixel_indices utility function.
Test by asserting that the function returns correct results for
given test values.
"""
assert np.all(subpixel_indices(position, subsampling) == subpixel_index)
@pytest.mark.skipif('not HAS_SKIMAGE')
class TestBlockReduce:
def test_1d(self):
"""Test 1D array."""
data = np.arange(4)
expected = np.array([1, 5])
result = block_reduce(data, 2)
assert np.all(result == expected)
def test_1d_mean(self):
"""Test 1D array with func=np.mean."""
data = np.arange(4)
block_size = 2.
expected = block_reduce(data, block_size, func=np.sum) / block_size
result_mean = block_reduce(data, block_size, func=np.mean)
assert np.all(result_mean == expected)
def test_2d(self):
"""Test 2D array."""
data = np.arange(4).reshape(2, 2)
expected = np.array([[6]])
result = block_reduce(data, 2)
assert np.all(result == expected)
def test_2d_mean(self):
"""Test 2D array with func=np.mean."""
data = np.arange(4).reshape(2, 2)
block_size = 2.
expected = (block_reduce(data, block_size, func=np.sum) /
block_size**2)
result = block_reduce(data, block_size, func=np.mean)
assert np.all(result == expected)
def test_2d_trim(self):
"""
Test trimming of 2D array when size is not perfectly divisible
by block_size.
"""
data1 = np.arange(15).reshape(5, 3)
result1 = block_reduce(data1, 2)
data2 = data1[0:4, 0:2]
result2 = block_reduce(data2, 2)
assert np.all(result1 == result2)
def test_block_size_broadcasting(self):
"""Test scalar block_size broadcasting."""
data = np.arange(16).reshape(4, 4)
result1 = block_reduce(data, 2)
result2 = block_reduce(data, (2, 2))
assert np.all(result1 == result2)
def test_block_size_len(self):
"""Test block_size length."""
data = np.ones((2, 2))
with pytest.raises(ValueError):
block_reduce(data, (2, 2, 2))
@pytest.mark.skipif('not HAS_SKIMAGE')
class TestBlockReplicate:
def test_1d(self):
"""Test 1D array."""
data = np.arange(2)
expected = np.array([0, 0, 0.5, 0.5])
result = block_replicate(data, 2)
assert np.all(result == expected)
def test_1d_conserve_sum(self):
"""Test 1D array with conserve_sum=False."""
data = np.arange(2)
block_size = 2.
expected = block_replicate(data, block_size) * block_size
result = block_replicate(data, block_size, conserve_sum=False)
assert np.all(result == expected)
def test_2d(self):
"""Test 2D array."""
data = np.arange(2).reshape(2, 1)
expected = np.array([[0, 0], [0, 0], [0.25, 0.25], [0.25, 0.25]])
result = block_replicate(data, 2)
assert np.all(result == expected)
def test_2d_conserve_sum(self):
"""Test 2D array with conserve_sum=False."""
data = np.arange(6).reshape(2, 3)
block_size = 2.
expected = block_replicate(data, block_size) * block_size**2
result = block_replicate(data, block_size, conserve_sum=False)
assert np.all(result == expected)
def test_block_size_broadcasting(self):
"""Test scalar block_size broadcasting."""
data = np.arange(4).reshape(2, 2)
result1 = block_replicate(data, 2)
result2 = block_replicate(data, (2, 2))
assert np.all(result1 == result2)
def test_block_size_len(self):
"""Test block_size length."""
data = np.arange(5)
with pytest.raises(ValueError):
block_replicate(data, (2, 2))
class TestCutout2D:
def setup_class(self):
self.data = np.arange(20.).reshape(5, 4)
self.position = SkyCoord('13h11m29.96s -01d19m18.7s', frame='icrs')
wcs = WCS(naxis=2)
rho = np.pi / 3.
scale = 0.05 / 3600.
wcs.wcs.cd = [[scale*np.cos(rho), -scale*np.sin(rho)],
[scale*np.sin(rho), scale*np.cos(rho)]]
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
wcs.wcs.crval = [self.position.ra.to_value(u.deg),
self.position.dec.to_value(u.deg)]
wcs.wcs.crpix = [3, 3]
self.wcs = wcs
# add SIP
sipwcs = wcs.deepcopy()
sipwcs.wcs.ctype = ['RA---TAN-SIP', 'DEC--TAN-SIP']
a = np.array(
[[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[ -2.81029767e-13, 0.0, 0.0, 0.0, 0.0]]
)
b = np.array(
[[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0]]
)
sipwcs.sip = Sip(a, b, None, None, wcs.wcs.crpix)
sipwcs.wcs.set()
self.sipwcs = sipwcs
def test_cutout(self):
sizes = [3, 3*u.pixel, (3, 3), (3*u.pixel, 3*u.pix), (3., 3*u.pixel),
(2.9, 3.3)]
for size in sizes:
position = (2.1, 1.9)
c = Cutout2D(self.data, position, size)
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 10
assert c.origin_original == (1, 1)
assert c.origin_cutout == (0, 0)
assert c.input_position_original == position
assert_allclose(c.input_position_cutout, (1.1, 0.9))
assert c.position_original == (2., 2.)
assert c.position_cutout == (1., 1.)
assert c.center_original == (2., 2.)
assert c.center_cutout == (1., 1.)
assert c.bbox_original == ((1, 3), (1, 3))
assert c.bbox_cutout == ((0, 2), (0, 2))
assert c.slices_original == (slice(1, 4), slice(1, 4))
assert c.slices_cutout == (slice(0, 3), slice(0, 3))
def test_size_length(self):
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), (1, 1, 1))
def test_size_units(self):
for size in [3 * u.cm, (3, 3 * u.K)]:
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), size)
def test_size_pixel(self):
"""
Check size in derived pixel units.
"""
size = 0.3*u.arcsec / (0.1*u.arcsec/u.pixel)
c = Cutout2D(self.data, (2, 2), size)
assert c.data.shape == (3, 3)
assert c.data[0, 0] == 5
assert c.slices_original == (slice(1, 4), slice(1, 4))
assert c.slices_cutout == (slice(0, 3), slice(0, 3))
def test_size_angle(self):
c = Cutout2D(self.data, (2, 2), (0.1*u.arcsec), wcs=self.wcs)
assert c.data.shape == (2, 2)
assert c.data[0, 0] == 5
assert c.slices_original == (slice(1, 3), slice(1, 3))
assert c.slices_cutout == (slice(0, 2), slice(0, 2))
def test_size_angle_without_wcs(self):
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), (3, 3 * u.arcsec))
def test_cutout_trim_overlap(self):
c = Cutout2D(self.data, (0, 0), (3, 3), mode='trim')
assert c.data.shape == (2, 2)
assert c.data[0, 0] == 0
assert c.slices_original == (slice(0, 2), slice(0, 2))
assert c.slices_cutout == (slice(0, 2), slice(0, 2))
def test_cutout_partial_overlap(self):
c = Cutout2D(self.data, (0, 0), (3, 3), mode='partial')
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 0
assert c.slices_original == (slice(0, 2), slice(0, 2))
assert c.slices_cutout == (slice(1, 3), slice(1, 3))
def test_cutout_partial_overlap_fill_value(self):
fill_value = -99
c = Cutout2D(self.data, (0, 0), (3, 3), mode='partial',
fill_value=fill_value)
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 0
assert c.data[0, 0] == fill_value
def test_copy(self):
data = np.copy(self.data)
c = Cutout2D(data, (2, 3), (3, 3))
xy = (0, 0)
value = 100.
c.data[xy] = value
xy_orig = c.to_original_position(xy)
yx = xy_orig[::-1]
assert data[yx] == value
data = np.copy(self.data)
c2 = Cutout2D(self.data, (2, 3), (3, 3), copy=True)
c2.data[xy] = value
assert data[yx] != value
def test_to_from_large(self):
position = (2, 2)
c = Cutout2D(self.data, position, (3, 3))
xy = (0, 0)
result = c.to_cutout_position(c.to_original_position(xy))
assert_allclose(result, xy)
def test_skycoord_without_wcs(self):
with pytest.raises(ValueError):
Cutout2D(self.data, self.position, (3, 3))
def test_skycoord(self):
c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs)
skycoord_original = self.position.from_pixel(c.center_original[1],
c.center_original[0],
self.wcs)
skycoord_cutout = self.position.from_pixel(c.center_cutout[1],
c.center_cutout[0], c.wcs)
assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)
assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)
def test_skycoord_partial(self):
c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs,
mode='partial')
skycoord_original = self.position.from_pixel(c.center_original[1],
c.center_original[0],
self.wcs)
skycoord_cutout = self.position.from_pixel(c.center_cutout[1],
c.center_cutout[0], c.wcs)
assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)
assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)
def test_naxis_update(self):
xsize = 2
ysize = 3
c = Cutout2D(self.data, self.position, (ysize, xsize), wcs=self.wcs)
assert c.wcs._naxis[0] == xsize
assert c.wcs._naxis[1] == ysize
def test_crpix_maps_to_crval(self):
w = Cutout2D(self.data, (0, 0), (3, 3), wcs=self.sipwcs,
mode='partial').wcs
pscale = np.sqrt(proj_plane_pixel_area(w))
assert_allclose(
w.wcs_pix2world(*w.wcs.crpix, 1), w.wcs.crval,
rtol=0.0, atol=1e-6 * pscale
)
assert_allclose(
w.all_pix2world(*w.wcs.crpix, 1), w.wcs.crval,
rtol=0.0, atol=1e-6 * pscale
)
|
3b53f407c153880fdc620053797306c79c046cf586057705713d30d629c8fd0b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import pytest
import numpy as np
from ...tests.helper import catch_warnings
from ...utils.exceptions import AstropyUserWarning
from ... import units as u
from ..nddata import NDData
from ..decorators import support_nddata
class CCDData(NDData):
pass
@support_nddata
def wrapped_function_1(data, wcs=None, unit=None):
return data, wcs, unit
def test_pass_numpy():
data_in = np.array([1, 2, 3])
data_out, wcs_out, unit_out = wrapped_function_1(data=data_in)
assert data_out is data_in
assert wcs_out is None
assert unit_out is None
def test_pass_all_separate():
data_in = np.array([1, 2, 3])
wcs_in = "the wcs"
unit_in = u.Jy
data_out, wcs_out, unit_out = wrapped_function_1(data=data_in, wcs=wcs_in, unit=unit_in)
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in
def test_pass_nddata():
data_in = np.array([1, 2, 3])
wcs_in = "the wcs"
unit_in = u.Jy
nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in)
data_out, wcs_out, unit_out = wrapped_function_1(nddata_in)
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in
def test_pass_nddata_and_explicit():
data_in = np.array([1, 2, 3])
wcs_in = "the wcs"
unit_in = u.Jy
unit_in_alt = u.mJy
nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in)
with catch_warnings() as w:
data_out, wcs_out, unit_out = wrapped_function_1(nddata_in, unit=unit_in_alt)
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in_alt
assert len(w) == 1
assert str(w[0].message) == ("Property unit has been passed explicitly and as "
"an NDData property, using explicitly specified value")
def test_pass_nddata_ignored():
data_in = np.array([1, 2, 3])
wcs_in = "the wcs"
unit_in = u.Jy
nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in, mask=[0, 1, 0])
with catch_warnings() as w:
data_out, wcs_out, unit_out = wrapped_function_1(nddata_in)
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in
assert len(w) == 1
assert str(w[0].message) == ("The following attributes were set on the data "
"object, but will be ignored by the function: mask")
def test_incorrect_first_argument():
with pytest.raises(ValueError) as exc:
@support_nddata
def wrapped_function_2(something, wcs=None, unit=None):
pass
assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`"
with pytest.raises(ValueError) as exc:
@support_nddata
def wrapped_function_3(something, data, wcs=None, unit=None):
pass
assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`"
with pytest.raises(ValueError) as exc:
@support_nddata
def wrapped_function_4(wcs=None, unit=None):
pass
assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`"
def test_wrap_function_no_kwargs():
@support_nddata
def wrapped_function_5(data, other_data):
return data
data_in = np.array([1, 2, 3])
nddata_in = NDData(data_in)
assert wrapped_function_5(nddata_in, [1, 2, 3]) is data_in
def test_wrap_function_repack_valid():
@support_nddata(repack=True, returns=['data'])
def wrapped_function_5(data, other_data):
return data
data_in = np.array([1, 2, 3])
nddata_in = NDData(data_in)
nddata_out = wrapped_function_5(nddata_in, [1, 2, 3])
assert isinstance(nddata_out, NDData)
assert nddata_out.data is data_in
def test_wrap_function_accepts():
class MyData(NDData):
pass
@support_nddata(accepts=MyData)
def wrapped_function_5(data, other_data):
return data
data_in = np.array([1, 2, 3])
nddata_in = NDData(data_in)
mydata_in = MyData(data_in)
assert wrapped_function_5(mydata_in, [1, 2, 3]) is data_in
with pytest.raises(TypeError) as exc:
wrapped_function_5(nddata_in, [1, 2, 3])
assert exc.value.args[0] == "Only NDData sub-classes that inherit from MyData can be used by this function"
def test_wrap_preserve_signature_docstring():
@support_nddata
def wrapped_function_6(data, wcs=None, unit=None):
"""
An awesome function
"""
pass
if wrapped_function_6.__doc__ is not None:
assert wrapped_function_6.__doc__.strip() == "An awesome function"
signature = inspect.signature(wrapped_function_6)
assert str(signature) == "(data, wcs=None, unit=None)"
def test_setup_failures1():
# repack but no returns
with pytest.raises(ValueError):
support_nddata(repack=True)
def test_setup_failures2():
# returns but no repack
with pytest.raises(ValueError):
support_nddata(returns=['data'])
def test_setup_failures9():
# keeps but no repack
with pytest.raises(ValueError):
support_nddata(keeps=['unit'])
def test_setup_failures3():
# same attribute in keeps and returns
with pytest.raises(ValueError):
support_nddata(repack=True, keeps=['mask'], returns=['data', 'mask'])
def test_setup_failures4():
# function accepts *args
with pytest.raises(ValueError):
@support_nddata
def test(data, *args):
pass
def test_setup_failures10():
# function accepts **kwargs
with pytest.raises(ValueError):
@support_nddata
def test(data, **kwargs):
pass
def test_setup_failures5():
# function accepts *args (or **kwargs)
with pytest.raises(ValueError):
@support_nddata
def test(data, *args):
pass
def test_setup_failures6():
# First argument is not data
with pytest.raises(ValueError):
@support_nddata
def test(img):
pass
def test_setup_failures7():
# accepts CCDData but was given just an NDData
with pytest.raises(TypeError):
@support_nddata(accepts=CCDData)
def test(data):
pass
test(NDData(np.ones((3, 3))))
def test_setup_failures8():
# function returns a different amount of arguments than specified. Using
# NDData here so we don't get into troubles when creating a CCDData without
# unit!
with pytest.raises(ValueError):
@support_nddata(repack=True, returns=['data', 'mask'])
def test(data):
return 10
test(NDData(np.ones((3, 3)))) # do NOT use CCDData here.
def test_setup_failures11():
# function accepts no arguments
with pytest.raises(ValueError):
@support_nddata
def test():
pass
def test_setup_numpyarray_default():
# It should be possible (even if it's not advisable to use mutable
# defaults) to have a numpy array as default value.
@support_nddata
def func(data, wcs=np.array([1, 2, 3])):
return wcs
def test_still_accepts_other_input():
@support_nddata(repack=True, returns=['data'])
def test(data):
return data
assert isinstance(test(NDData(np.ones((3, 3)))), NDData)
assert isinstance(test(10), int)
assert isinstance(test([1, 2, 3]), list)
def test_accepting_property_normal():
# Accepts a mask attribute and takes it from the input
@support_nddata
def test(data, mask=None):
return mask
ndd = NDData(np.ones((3, 3)))
assert test(ndd) is None
ndd._mask = np.zeros((3, 3))
assert np.all(test(ndd) == 0)
# Use the explicitly given one (raises a Warning)
with catch_warnings(AstropyUserWarning) as w:
assert test(ndd, mask=10) == 10
assert len(w) == 1
def test_parameter_default_identical_to_explicit_passed_argument():
# If the default is identical to the explicitly passed argument this
# should still raise a Warning and use the explicit one.
@support_nddata
def func(data, wcs=[1, 2, 3]):
return wcs
with catch_warnings(AstropyUserWarning) as w:
assert func(NDData(1, wcs=[1, 2]), [1, 2, 3]) == [1, 2, 3]
assert len(w) == 1
with catch_warnings(AstropyUserWarning) as w:
assert func(NDData(1, wcs=[1, 2])) == [1, 2]
assert len(w) == 0
def test_accepting_property_notexist():
# Accepts flags attribute but NDData doesn't have one
@support_nddata
def test(data, flags=10):
return flags
ndd = NDData(np.ones((3, 3)))
test(ndd)
def test_accepting_property_translated():
# Accepts a error attribute and we want to pass in uncertainty!
@support_nddata(mask='masked')
def test(data, masked=None):
return masked
ndd = NDData(np.ones((3, 3)))
assert test(ndd) is None
ndd._mask = np.zeros((3, 3))
assert np.all(test(ndd) == 0)
# Use the explicitly given one (raises a Warning)
with catch_warnings(AstropyUserWarning) as w:
assert test(ndd, masked=10) == 10
assert len(w) == 1
def test_accepting_property_meta_empty():
# Meta is always set (OrderedDict) so it has a special case that it's
# ignored if it's empty but not None
@support_nddata
def test(data, meta=None):
return meta
ndd = NDData(np.ones((3, 3)))
assert test(ndd) is None
ndd._meta = {'a': 10}
assert test(ndd) == {'a': 10}
|
640d05c1cbeddfa74509ab2e8571c69cac9b04e0ee69305c195dcc815e711d75 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from .. import core as erfa
from ...tests.helper import catch_warnings
def test_erfa_wrapper():
"""
Runs a set of tests that mostly make sure vectorization is
working as expected
"""
jd = np.linspace(2456855.5, 2456855.5+1.0/24.0/60.0, 60*2+1)
ra = np.linspace(0.0, np.pi*2.0, 5)
dec = np.linspace(-np.pi/2.0, np.pi/2.0, 4)
aob, zob, hob, dob, rob, eo = erfa.atco13(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, jd, 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5)
assert aob.shape == (121,)
aob, zob, hob, dob, rob, eo = erfa.atco13(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, jd[0], 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5)
assert aob.shape == ()
aob, zob, hob, dob, rob, eo = erfa.atco13(ra[:, None, None], dec[None, :, None], 0.0, 0.0, 0.0, 0.0, jd[None, None, :], 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5)
(aob.shape) == (5, 4, 121)
iy, im, id, ihmsf = erfa.d2dtf("UTC", 3, jd, 0.0)
assert iy.shape == (121,)
assert ihmsf.shape == (121,)
assert ihmsf.dtype == erfa.dt_hmsf
iy, im, id, ihmsf = erfa.d2dtf("UTC", 3, jd[0], 0.0)
assert iy.shape == ()
assert ihmsf.shape == ()
assert ihmsf.dtype == erfa.dt_hmsf
def test_angle_ops():
sign, idmsf = erfa.a2af(6, -np.pi)
assert sign == b'-'
assert idmsf.item() == (180, 0, 0, 0)
sign, ihmsf = erfa.a2tf(6, np.pi)
assert sign == b'+'
assert ihmsf.item() == (12, 0, 0, 0)
rad = erfa.af2a('-', 180, 0, 0.0)
np.testing.assert_allclose(rad, -np.pi)
rad = erfa.tf2a('+', 12, 0, 0.0)
np.testing.assert_allclose(rad, np.pi)
rad = erfa.anp(3.*np.pi)
np.testing.assert_allclose(rad, np.pi)
rad = erfa.anpm(3.*np.pi)
np.testing.assert_allclose(rad, -np.pi)
sign, ihmsf = erfa.d2tf(1, -1.5)
assert sign == b'-'
assert ihmsf.item() == (36, 0, 0, 0)
days = erfa.tf2d('+', 3, 0, 0.0)
np.testing.assert_allclose(days, 0.125)
def test_spherical_cartesian():
theta, phi = erfa.c2s([0.0, np.sqrt(2.0), np.sqrt(2.0)])
np.testing.assert_allclose(theta, np.pi/2.0)
np.testing.assert_allclose(phi, np.pi/4.0)
theta, phi, r = erfa.p2s([0.0, np.sqrt(2.0), np.sqrt(2.0)])
np.testing.assert_allclose(theta, np.pi/2.0)
np.testing.assert_allclose(phi, np.pi/4.0)
np.testing.assert_allclose(r, 2.0)
pv = np.array(([0.0, np.sqrt(2.0), np.sqrt(2.0)], [1.0, 0.0, 0.0]),
dtype=erfa.dt_pv)
theta, phi, r, td, pd, rd = erfa.pv2s(pv)
np.testing.assert_allclose(theta, np.pi/2.0)
np.testing.assert_allclose(phi, np.pi/4.0)
np.testing.assert_allclose(r, 2.0)
np.testing.assert_allclose(td, -np.sqrt(2.0)/2.0)
np.testing.assert_allclose(pd, 0.0)
np.testing.assert_allclose(rd, 0.0)
c = erfa.s2c(np.pi/2.0, np.pi/4.0)
np.testing.assert_allclose(c, [0.0, np.sqrt(2.0)/2.0, np.sqrt(2.0)/2.0], atol=1e-14)
c = erfa.s2p(np.pi/2.0, np.pi/4.0, 1.0)
np.testing.assert_allclose(c, [0.0, np.sqrt(2.0)/2.0, np.sqrt(2.0)/2.0], atol=1e-14)
pv = erfa.s2pv(np.pi/2.0, np.pi/4.0, 2.0, np.sqrt(2.0)/2.0, 0.0, 0.0)
np.testing.assert_allclose(pv['p'], [0.0, np.sqrt(2.0), np.sqrt(2.0)], atol=1e-14)
np.testing.assert_allclose(pv['v'], [-1.0, 0.0, 0.0], atol=1e-14)
def test_errwarn_reporting():
"""
Test that the ERFA error reporting mechanism works as it should
"""
# no warning
erfa.dat(1990, 1, 1, 0.5)
# check warning is raised for a scalar
with catch_warnings() as w:
erfa.dat(100, 1, 1, 0.5)
assert len(w) == 1
assert w[0].category == erfa.ErfaWarning
assert '1 of "dubious year (Note 1)"' in str(w[0].message)
# and that the count is right for a vector.
with catch_warnings() as w:
erfa.dat([100, 200, 1990], 1, 1, 0.5)
assert len(w) == 1
assert w[0].category == erfa.ErfaWarning
assert '2 of "dubious year (Note 1)"' in str(w[0].message)
try:
erfa.dat(1990, [1, 34, 2], [1, 1, 43], 0.5)
except erfa.ErfaError as e:
if '1 of "bad day (Note 3)", 1 of "bad month"' not in e.args[0]:
assert False, 'Raised the correct type of error, but wrong message: ' + e.args[0]
try:
erfa.dat(200, [1, 34, 2], [1, 1, 43], 0.5)
except erfa.ErfaError as e:
if 'warning' in e.args[0]:
assert False, 'Raised the correct type of error, but there were warnings mixed in: ' + e.args[0]
def test_vector_inouts():
"""
Tests that ERFA functions working with vectors are correctly consumed and spit out
"""
# values are from test_erfa.c t_ab function
pnat = [-0.76321968546737951,
-0.60869453983060384,
-0.21676408580639883]
v = [2.1044018893653786e-5,
-8.9108923304429319e-5,
-3.8633714797716569e-5]
s = 0.99980921395708788
bm1 = 0.99999999506209258
expected = [-0.7631631094219556269,
-0.6087553082505590832,
-0.2167926269368471279]
res = erfa.ab(pnat, v, s, bm1)
assert res.shape == (3,)
np.testing.assert_allclose(res, expected)
res2 = erfa.ab([pnat]*4, v, s, bm1)
assert res2.shape == (4, 3)
np.testing.assert_allclose(res2, [expected]*4)
# here we stride an array and also do it Fortran-order to make sure
# it all still works correctly with non-contig arrays
pnata = np.array(pnat)
arrin = np.array([pnata, pnata/2, pnata/3, pnata/4, pnata/5]*4, order='F')
res3 = erfa.ab(arrin[::5], v, s, bm1)
assert res3.shape == (4, 3)
np.testing.assert_allclose(res3, [expected]*4)
def test_pv_in():
jd1 = 2456165.5
jd2 = 0.401182685
pv = np.empty((), dtype=erfa.dt_pv)
pv['p'] = [-6241497.16,
401346.896,
-1251136.04]
pv['v'] = [-29.264597,
-455.021831,
0.0266151194]
astrom = erfa.apcs13(jd1, jd2, pv)
assert astrom.shape == ()
# values from t_erfa_c
np.testing.assert_allclose(astrom['pmt'], 12.65133794027378508)
np.testing.assert_allclose(astrom['em'], 1.010428384373318379)
np.testing.assert_allclose(astrom['eb'], [0.9012691529023298391,
-.4173999812023068781,
-.1809906511146821008])
np.testing.assert_allclose(astrom['bpn'], np.eye(3))
# first make sure it *fails* if we mess with the input orders
pvbad = np.empty_like(pv)
pvbad['p'], pvbad['v'] = pv['v'], pv['p']
astrombad = erfa.apcs13(jd1, jd2, pvbad)
assert not np.allclose(astrombad['em'], 1.010428384373318379)
pvarr = np.array([pv]*3)
astrom2 = erfa.apcs13(jd1, jd2, pvarr)
assert astrom2.shape == (3,)
np.testing.assert_allclose(astrom2['em'], 1.010428384373318379)
# try striding of the input array to make non-contiguous
pvmatarr = np.array([pv]*9)[::3]
astrom3 = erfa.apcs13(jd1, jd2, pvmatarr)
assert astrom3.shape == (3,)
np.testing.assert_allclose(astrom3['em'], 1.010428384373318379)
def test_structs():
"""
Checks producing and consuming of ERFA c structs
"""
am, eo = erfa.apci13(2456165.5, [0.401182685, 1])
assert am.shape == (2, )
assert am.dtype == erfa.dt_eraASTROM
assert eo.shape == (2, )
# a few spotchecks from test_erfa.c
np.testing.assert_allclose(am[0]['pmt'], 12.65133794027378508)
np.testing.assert_allclose(am[0]['v'], [0.4289638897157027528e-4,
0.8115034002544663526e-4,
0.3517555122593144633e-4])
ri, di = erfa.atciqz(2.71, 0.174, am[0])
np.testing.assert_allclose(ri, 2.709994899247599271)
np.testing.assert_allclose(di, 0.1728740720983623469)
|
f39e6e66fa54928c698a27994eb6b798de86f0ac2129a42ff875fb36e6ae5853 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from ...utils.data import get_pkg_data_contents, get_pkg_data_filename
from ...time import Time
from ... import units as u
from ..wcs import WCS, Sip, WCSSUB_LONGITUDE, WCSSUB_LATITUDE
from ..utils import (proj_plane_pixel_scales, proj_plane_pixel_area,
is_proj_plane_distorted,
non_celestial_pixel_scales, wcs_to_celestial_frame,
celestial_frame_to_wcs, skycoord_to_pixel,
pixel_to_skycoord, custom_wcs_to_frame_mappings,
custom_frame_to_wcs_mappings, add_stokes_axis_to_wcs)
def test_wcs_dropping():
wcs = WCS(naxis=4)
wcs.wcs.pc = np.zeros([4, 4])
np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5))
pc = wcs.wcs.pc # for later use below
dropped = wcs.dropaxis(0)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))
dropped = wcs.dropaxis(1)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))
dropped = wcs.dropaxis(2)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))
dropped = wcs.dropaxis(3)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))
wcs = WCS(naxis=4)
wcs.wcs.cd = pc
dropped = wcs.dropaxis(0)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))
dropped = wcs.dropaxis(1)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))
dropped = wcs.dropaxis(2)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))
dropped = wcs.dropaxis(3)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))
def test_wcs_swapping():
wcs = WCS(naxis=4)
wcs.wcs.pc = np.zeros([4, 4])
np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5))
pc = wcs.wcs.pc # for later use below
swapped = wcs.swapaxes(0, 1)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4]))
swapped = wcs.swapaxes(0, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1]))
swapped = wcs.swapaxes(2, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3]))
wcs = WCS(naxis=4)
wcs.wcs.cd = pc
swapped = wcs.swapaxes(0, 1)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4]))
swapped = wcs.swapaxes(0, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1]))
swapped = wcs.swapaxes(2, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3]))
@pytest.mark.parametrize('ndim', (2, 3))
def test_add_stokes(ndim):
wcs = WCS(naxis=ndim)
for ii in range(ndim + 1):
outwcs = add_stokes_axis_to_wcs(wcs, ii)
assert outwcs.wcs.naxis == ndim + 1
assert outwcs.wcs.ctype[ii] == 'STOKES'
assert outwcs.wcs.cname[ii] == 'STOKES'
def test_slice():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
mywcs._naxis = [1000, 500]
pscale = 0.1 # from cdelt
slice_wcs = mywcs.slice([slice(1, None), slice(0, None)])
assert np.all(slice_wcs.wcs.crpix == np.array([1, 0]))
assert slice_wcs._naxis == [1000, 499]
# test that CRPIX maps to CRVAL:
assert_allclose(
slice_wcs.wcs_pix2world(*slice_wcs.wcs.crpix, 1),
slice_wcs.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)])
assert np.all(slice_wcs.wcs.crpix == np.array([0.625, 0.25]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
assert slice_wcs._naxis == [250, 250]
slice_wcs = mywcs.slice([slice(None, None, 2), slice(0, None, 2)])
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.2]))
assert slice_wcs._naxis == [500, 250]
# Non-integral values do not alter the naxis attribute
slice_wcs = mywcs.slice([slice(50.), slice(20.)])
assert slice_wcs._naxis == [1000, 500]
slice_wcs = mywcs.slice([slice(50.), slice(20)])
assert slice_wcs._naxis == [20, 500]
slice_wcs = mywcs.slice([slice(50), slice(20.5)])
assert slice_wcs._naxis == [1000, 50]
def test_slice_with_sip():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
mywcs._naxis = [1000, 500]
mywcs.wcs.ctype = ['RA---TAN-SIP', 'DEC--TAN-SIP']
a = np.array(
[[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[ -2.81029767e-13, 0.0, 0.0, 0.0, 0.0]]
)
b = np.array(
[[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0]]
)
mywcs.sip = Sip(a, b, None, None, mywcs.wcs.crpix)
mywcs.wcs.set()
pscale = 0.1 # from cdelt
slice_wcs = mywcs.slice([slice(1, None), slice(0, None)])
# test that CRPIX maps to CRVAL:
assert_allclose(
slice_wcs.all_pix2world(*slice_wcs.wcs.crpix, 1),
slice_wcs.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)])
# test that CRPIX maps to CRVAL:
assert_allclose(
slice_wcs.all_pix2world(*slice_wcs.wcs.crpix, 1),
slice_wcs.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
def test_slice_getitem():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
slice_wcs = mywcs[1::2, 0::4]
assert np.all(slice_wcs.wcs.crpix == np.array([0.625, 0.25]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
mywcs.wcs.crpix = [2, 2]
slice_wcs = mywcs[1::2, 0::4]
assert np.all(slice_wcs.wcs.crpix == np.array([0.875, 0.75]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
# Default: numpy order
slice_wcs = mywcs[1::2]
assert np.all(slice_wcs.wcs.crpix == np.array([2, 0.75]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.1, 0.2]))
def test_slice_fitsorder():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
slice_wcs = mywcs.slice([slice(1, None), slice(0, None)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0, 1]))
slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0.25, 0.625]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.4]))
slice_wcs = mywcs.slice([slice(1, None, 2)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0.25, 1]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.1]))
def test_invalid_slice():
mywcs = WCS(naxis=2)
with pytest.raises(ValueError) as exc:
mywcs[0]
assert exc.value.args[0] == ("Cannot downsample a WCS with indexing. Use "
"wcs.sub or wcs.dropaxis if you want to remove "
"axes.")
with pytest.raises(ValueError) as exc:
mywcs[0, ::2]
assert exc.value.args[0] == ("Cannot downsample a WCS with indexing. Use "
"wcs.sub or wcs.dropaxis if you want to remove "
"axes.")
def test_axis_names():
mywcs = WCS(naxis=4)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT-LSR', 'STOKES']
assert mywcs.axis_type_names == ['RA', 'DEC', 'VOPT', 'STOKES']
mywcs.wcs.cname = ['RA', 'DEC', 'VOPT', 'STOKES']
assert mywcs.axis_type_names == ['RA', 'DEC', 'VOPT', 'STOKES']
def test_celestial():
mywcs = WCS(naxis=4)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT', 'STOKES']
cel = mywcs.celestial
assert tuple(cel.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert cel.axis_type_names == ['RA', 'DEC']
def test_wcs_to_celestial_frame():
# Import astropy.coordinates here to avoid circular imports
from ...coordinates.builtin_frames import ICRS, ITRS, FK5, FK4, Galactic
mywcs = WCS(naxis=2)
mywcs.wcs.set()
with pytest.raises(ValueError) as exc:
assert wcs_to_celestial_frame(mywcs) is None
assert exc.value.args[0] == "Could not determine celestial frame corresponding to the specified WCS object"
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['XOFFSET', 'YOFFSET']
mywcs.wcs.set()
with pytest.raises(ValueError):
assert wcs_to_celestial_frame(mywcs) is None
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
mywcs.wcs.equinox = 1987.
mywcs.wcs.set()
print(mywcs.to_header())
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, FK5)
assert frame.equinox == Time(1987., format='jyear')
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
mywcs.wcs.equinox = 1982
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, FK4)
assert frame.equinox == Time(1982., format='byear')
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['GLON-SIN', 'GLAT-SIN']
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, Galactic)
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['TLON-CAR', 'TLAT-CAR']
mywcs.wcs.dateobs = '2017-08-17T12:41:04.430'
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ITRS)
assert frame.obstime == Time('2017-08-17T12:41:04.430')
for equinox in [np.nan, 1987, 1982]:
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
mywcs.wcs.radesys = 'ICRS'
mywcs.wcs.equinox = equinox
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
# Flipped order
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['DEC--TAN', 'RA---TAN']
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
# More than two dimensions
mywcs = WCS(naxis=3)
mywcs.wcs.ctype = ['DEC--TAN', 'VELOCITY', 'RA---TAN']
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
mywcs = WCS(naxis=3)
mywcs.wcs.ctype = ['GLAT-CAR', 'VELOCITY', 'GLON-CAR']
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, Galactic)
def test_wcs_to_celestial_frame_extend():
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['XOFFSET', 'YOFFSET']
mywcs.wcs.set()
with pytest.raises(ValueError):
wcs_to_celestial_frame(mywcs)
class OffsetFrame:
pass
def identify_offset(wcs):
if wcs.wcs.ctype[0].endswith('OFFSET') and wcs.wcs.ctype[1].endswith('OFFSET'):
return OffsetFrame()
with custom_wcs_to_frame_mappings(identify_offset):
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, OffsetFrame)
# Check that things are back to normal after the context manager
with pytest.raises(ValueError):
wcs_to_celestial_frame(mywcs)
def test_celestial_frame_to_wcs():
# Import astropy.coordinates here to avoid circular imports
from ...coordinates import ICRS, ITRS, FK5, FK4, FK4NoETerms, Galactic, BaseCoordinateFrame
class FakeFrame(BaseCoordinateFrame):
pass
frame = FakeFrame()
with pytest.raises(ValueError) as exc:
celestial_frame_to_wcs(frame)
assert exc.value.args[0] == ("Could not determine WCS corresponding to "
"the specified coordinate frame.")
frame = ICRS()
mywcs = celestial_frame_to_wcs(frame)
mywcs.wcs.set()
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'ICRS'
assert np.isnan(mywcs.wcs.equinox)
assert mywcs.wcs.lonpole == 180
assert mywcs.wcs.latpole == 0
frame = FK5(equinox='J1987')
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'FK5'
assert mywcs.wcs.equinox == 1987.
frame = FK4(equinox='B1982')
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'FK4'
assert mywcs.wcs.equinox == 1982.
frame = FK4NoETerms(equinox='B1982')
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'FK4-NO-E'
assert mywcs.wcs.equinox == 1982.
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('GLON-TAN', 'GLAT-TAN')
assert mywcs.wcs.radesys == ''
assert np.isnan(mywcs.wcs.equinox)
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
assert tuple(mywcs.wcs.ctype) == ('GLON-CAR', 'GLAT-CAR')
assert mywcs.wcs.radesys == ''
assert np.isnan(mywcs.wcs.equinox)
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
mywcs.wcs.crval = [100, -30]
mywcs.wcs.set()
assert_allclose((mywcs.wcs.lonpole, mywcs.wcs.latpole), (180, 60))
frame = ITRS(obstime=Time('2017-08-17T12:41:04.43'))
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
assert tuple(mywcs.wcs.ctype) == ('TLON-CAR', 'TLAT-CAR')
assert mywcs.wcs.radesys == 'ITRS'
assert mywcs.wcs.dateobs == '2017-08-17T12:41:04.430'
frame = ITRS()
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
assert tuple(mywcs.wcs.ctype) == ('TLON-CAR', 'TLAT-CAR')
assert mywcs.wcs.radesys == 'ITRS'
assert mywcs.wcs.dateobs == Time('J2000').utc.isot
def test_celestial_frame_to_wcs_extend():
class OffsetFrame:
pass
frame = OffsetFrame()
with pytest.raises(ValueError):
celestial_frame_to_wcs(frame)
def identify_offset(frame, projection=None):
if isinstance(frame, OffsetFrame):
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['XOFFSET', 'YOFFSET']
return wcs
with custom_frame_to_wcs_mappings(identify_offset):
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('XOFFSET', 'YOFFSET')
# Check that things are back to normal after the context manager
with pytest.raises(ValueError):
celestial_frame_to_wcs(frame)
def test_pixscale_nodrop():
mywcs = WCS(naxis=2)
mywcs.wcs.cdelt = [0.1, 0.2]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
mywcs.wcs.cdelt = [-0.1, 0.2]
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
def test_pixscale_withdrop():
mywcs = WCS(naxis=3)
mywcs.wcs.cdelt = [0.1, 0.2, 1]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT']
assert_almost_equal(proj_plane_pixel_scales(mywcs.celestial), (0.1, 0.2))
mywcs.wcs.cdelt = [-0.1, 0.2, 1]
assert_almost_equal(proj_plane_pixel_scales(mywcs.celestial), (0.1, 0.2))
def test_pixscale_cd():
mywcs = WCS(naxis=2)
mywcs.wcs.cd = [[-0.1, 0], [0, 0.2]]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
@pytest.mark.parametrize('angle',
(30, 45, 60, 75))
def test_pixscale_cd_rotated(angle):
mywcs = WCS(naxis=2)
rho = np.radians(angle)
scale = 0.1
mywcs.wcs.cd = [[scale * np.cos(rho), -scale * np.sin(rho)],
[scale * np.sin(rho), scale * np.cos(rho)]]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.1))
@pytest.mark.parametrize('angle',
(30, 45, 60, 75))
def test_pixscale_pc_rotated(angle):
mywcs = WCS(naxis=2)
rho = np.radians(angle)
scale = 0.1
mywcs.wcs.cdelt = [-scale, scale]
mywcs.wcs.pc = [[np.cos(rho), -np.sin(rho)],
[np.sin(rho), np.cos(rho)]]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.1))
@pytest.mark.parametrize(('cdelt', 'pc', 'pccd'),
(([0.1, 0.2], np.eye(2), np.diag([0.1, 0.2])),
([0.1, 0.2, 0.3], np.eye(3), np.diag([0.1, 0.2, 0.3])),
([1, 1, 1], np.diag([0.1, 0.2, 0.3]), np.diag([0.1, 0.2, 0.3]))))
def test_pixel_scale_matrix(cdelt, pc, pccd):
mywcs = WCS(naxis=(len(cdelt)))
mywcs.wcs.cdelt = cdelt
mywcs.wcs.pc = pc
assert_almost_equal(mywcs.pixel_scale_matrix, pccd)
@pytest.mark.parametrize(('ctype', 'cel'),
((['RA---TAN', 'DEC--TAN'], True),
(['RA---TAN', 'DEC--TAN', 'FREQ'], False),
(['RA---TAN', 'FREQ'], False),))
def test_is_celestial(ctype, cel):
mywcs = WCS(naxis=len(ctype))
mywcs.wcs.ctype = ctype
assert mywcs.is_celestial == cel
@pytest.mark.parametrize(('ctype', 'cel'),
((['RA---TAN', 'DEC--TAN'], True),
(['RA---TAN', 'DEC--TAN', 'FREQ'], True),
(['RA---TAN', 'FREQ'], False),))
def test_has_celestial(ctype, cel):
mywcs = WCS(naxis=len(ctype))
mywcs.wcs.ctype = ctype
assert mywcs.has_celestial == cel
@pytest.mark.parametrize(('cdelt', 'pc', 'cd'),
((np.array([0.1, 0.2]), np.eye(2), np.eye(2)),
(np.array([1, 1]), np.diag([0.1, 0.2]), np.eye(2)),
(np.array([0.1, 0.2]), np.eye(2), None),
(np.array([0.1, 0.2]), None, np.eye(2)),
))
def test_noncelestial_scale(cdelt, pc, cd):
mywcs = WCS(naxis=2)
if cd is not None:
mywcs.wcs.cd = cd
if pc is not None:
mywcs.wcs.pc = pc
mywcs.wcs.cdelt = cdelt
mywcs.wcs.ctype = ['RA---TAN', 'FREQ']
ps = non_celestial_pixel_scales(mywcs)
assert_almost_equal(ps.to_value(u.deg), np.array([0.1, 0.2]))
@pytest.mark.parametrize('mode', ['all', 'wcs'])
def test_skycoord_to_pixel(mode):
# Import astropy.coordinates here to avoid circular imports
from ...coordinates import SkyCoord
header = get_pkg_data_contents('maps/1904-66_TAN.hdr', encoding='binary')
wcs = WCS(header)
ref = SkyCoord(0.1 * u.deg, -89. * u.deg, frame='icrs')
xp, yp = skycoord_to_pixel(ref, wcs, mode=mode)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode).transform_to('icrs')
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
# Make sure you can specify a different class using ``cls`` keyword
class SkyCoord2(SkyCoord):
pass
new2 = pixel_to_skycoord(xp, yp, wcs, mode=mode,
cls=SkyCoord2).transform_to('icrs')
assert new2.__class__ is SkyCoord2
assert_allclose(new2.ra.degree, ref.ra.degree)
assert_allclose(new2.dec.degree, ref.dec.degree)
def test_skycoord_to_pixel_swapped():
# Regression test for a bug that caused skycoord_to_pixel and
# pixel_to_skycoord to not work correctly if the axes were swapped in the
# WCS.
# Import astropy.coordinates here to avoid circular imports
from ...coordinates import SkyCoord
header = get_pkg_data_contents('maps/1904-66_TAN.hdr', encoding='binary')
wcs = WCS(header)
wcs_swapped = wcs.sub([WCSSUB_LATITUDE, WCSSUB_LONGITUDE])
ref = SkyCoord(0.1 * u.deg, -89. * u.deg, frame='icrs')
xp1, yp1 = skycoord_to_pixel(ref, wcs)
xp2, yp2 = skycoord_to_pixel(ref, wcs_swapped)
assert_allclose(xp1, xp2)
assert_allclose(yp1, yp2)
# WCS is in FK5 so we need to transform back to ICRS
new1 = pixel_to_skycoord(xp1, yp1, wcs).transform_to('icrs')
new2 = pixel_to_skycoord(xp1, yp1, wcs_swapped).transform_to('icrs')
assert_allclose(new1.ra.degree, new2.ra.degree)
assert_allclose(new1.dec.degree, new2.dec.degree)
def test_is_proj_plane_distorted():
# non-orthogonal CD:
wcs = WCS(naxis=2)
wcs.wcs.cd = [[-0.1, 0], [0, 0.2]]
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert(is_proj_plane_distorted(wcs))
# almost orthogonal CD:
wcs.wcs.cd = [[0.1 + 2.0e-7, 1.7e-7], [1.2e-7, 0.1 - 1.3e-7]]
assert(not is_proj_plane_distorted(wcs))
# real case:
header = get_pkg_data_filename('data/sip.fits')
wcs = WCS(header)
assert(is_proj_plane_distorted(wcs))
@pytest.mark.parametrize('mode', ['all', 'wcs'])
def test_skycoord_to_pixel_distortions(mode):
# Import astropy.coordinates here to avoid circular imports
from ...coordinates import SkyCoord
header = get_pkg_data_filename('data/sip.fits')
wcs = WCS(header)
ref = SkyCoord(202.50 * u.deg, 47.19 * u.deg, frame='icrs')
xp, yp = skycoord_to_pixel(ref, wcs, mode=mode)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode).transform_to('icrs')
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
|
a770919d6c73e90178c9b788b0ea2056d6085f11a57808f34743dded04dbb3a2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
import warnings
from datetime import datetime
import pytest
import numpy as np
from numpy.testing import (
assert_allclose, assert_array_almost_equal, assert_array_almost_equal_nulp,
assert_array_equal)
from ...tests.helper import raises, catch_warnings
from ... import wcs
from .. import _wcs
from ...utils.data import (
get_pkg_data_filenames, get_pkg_data_contents, get_pkg_data_filename)
from ...utils.misc import NumpyRNGContext
from ...io import fits
class TestMaps:
def setup(self):
# get the list of the hdr files that we want to test
self._file_list = list(get_pkg_data_filenames("maps", pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 28
assert len(self._file_list) == n_data_files, (
"test_spectra has wrong number data files: found {}, expected "
" {}".format(len(self._file_list), n_data_files))
def test_maps(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("maps", filename), encoding='binary')
# finally run the test.
wcsobj = wcs.WCS(header)
world = wcsobj.wcs_pix2world([[97, 97]], 1)
assert_array_almost_equal(world, [[285.0, -66.25]], decimal=1)
pix = wcsobj.wcs_world2pix([[285.0, -66.25]], 1)
assert_array_almost_equal(pix, [[97, 97]], decimal=0)
class TestSpectra:
def setup(self):
self._file_list = list(get_pkg_data_filenames("spectra",
pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 6
assert len(self._file_list) == n_data_files, (
"test_spectra has wrong number data files: found {}, expected "
" {}".format(len(self._file_list), n_data_files))
def test_spectra(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("spectra", filename), encoding='binary')
# finally run the test.
all_wcs = wcs.find_all_wcs(header)
assert len(all_wcs) == 9
def test_fixes():
"""
From github issue #36
"""
def run():
header = get_pkg_data_contents(
'data/nonstandard_units.hdr', encoding='binary')
try:
w = wcs.WCS(header, translate_units='dhs')
except wcs.InvalidTransformError:
pass
else:
assert False, "Expected InvalidTransformError"
with catch_warnings(wcs.FITSFixedWarning) as w:
run()
assert len(w) == 2
for item in w:
if 'unitfix' in str(item.message):
assert 'Hz' in str(item.message)
assert 'M/S' in str(item.message)
assert 'm/s' in str(item.message)
def test_outside_sky():
"""
From github issue #107
"""
header = get_pkg_data_contents(
'data/outside_sky.hdr', encoding='binary')
w = wcs.WCS(header)
assert np.all(np.isnan(w.wcs_pix2world([[100., 500.]], 0))) # outside sky
assert np.all(np.isnan(w.wcs_pix2world([[200., 200.]], 0))) # outside sky
assert not np.any(np.isnan(w.wcs_pix2world([[1000., 1000.]], 0)))
def test_pix2world():
"""
From github issue #1463
"""
# TODO: write this to test the expected output behavior of pix2world,
# currently this just makes sure it doesn't error out in unexpected ways
filename = get_pkg_data_filename('data/sip2.fits')
with catch_warnings(wcs.wcs.FITSFixedWarning) as caught_warnings:
# this raises a warning unimportant for this testing the pix2world
# FITSFixedWarning(u'The WCS transformation has more axes (2) than the
# image it is associated with (0)')
ww = wcs.WCS(filename)
# might as well monitor for changing behavior
assert len(caught_warnings) == 1
n = 3
pixels = (np.arange(n) * np.ones((2, n))).T
result = ww.wcs_pix2world(pixels, 0, ra_dec_order=True)
# Catch #2791
ww.wcs_pix2world(pixels[..., 0], pixels[..., 1], 0, ra_dec_order=True)
close_enough = 1e-8
# assuming that the data of sip2.fits doesn't change
answer = np.array([[0.00024976, 0.00023018],
[0.00023043, -0.00024997]])
assert np.all(np.abs(ww.wcs.pc - answer) < close_enough)
answer = np.array([[202.39265216, 47.17756518],
[202.39335826, 47.17754619],
[202.39406436, 47.1775272]])
assert np.all(np.abs(result - answer) < close_enough)
def test_load_fits_path():
fits_name = get_pkg_data_filename('data/sip.fits')
w = wcs.WCS(fits_name)
def test_dict_init():
"""
Test that WCS can be initialized with a dict-like object
"""
# Dictionary with no actual WCS, returns identity transform
w = wcs.WCS({})
xp, yp = w.wcs_world2pix(41., 2., 1)
assert_array_almost_equal_nulp(xp, 41., 10)
assert_array_almost_equal_nulp(yp, 2., 10)
# Valid WCS
w = wcs.WCS({'CTYPE1': 'GLON-CAR',
'CTYPE2': 'GLAT-CAR',
'CUNIT1': 'deg',
'CUNIT2': 'deg',
'CRPIX1': 1,
'CRPIX2': 1,
'CRVAL1': 40.,
'CRVAL2': 0.,
'CDELT1': -0.1,
'CDELT2': 0.1})
xp, yp = w.wcs_world2pix(41., 2., 0)
assert_array_almost_equal_nulp(xp, -10., 10)
assert_array_almost_equal_nulp(yp, 20., 10)
@raises(TypeError)
def test_extra_kwarg():
"""
Issue #444
"""
w = wcs.WCS()
with NumpyRNGContext(123456789):
data = np.random.rand(100, 2)
w.wcs_pix2world(data, origin=1)
def test_3d_shapes():
"""
Issue #444
"""
w = wcs.WCS(naxis=3)
with NumpyRNGContext(123456789):
data = np.random.rand(100, 3)
result = w.wcs_pix2world(data, 1)
assert result.shape == (100, 3)
result = w.wcs_pix2world(
data[..., 0], data[..., 1], data[..., 2], 1)
assert len(result) == 3
def test_preserve_shape():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = np.random.random((2, 3, 4))
xw, yw = w.wcs_pix2world(x, y, 1)
assert xw.shape == (2, 3, 4)
assert yw.shape == (2, 3, 4)
xp, yp = w.wcs_world2pix(x, y, 1)
assert xp.shape == (2, 3, 4)
assert yp.shape == (2, 3, 4)
def test_broadcasting():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = 1
xp, yp = w.wcs_world2pix(x, y, 1)
assert xp.shape == (2, 3, 4)
assert yp.shape == (2, 3, 4)
def test_shape_mismatch():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = np.random.random((3, 2, 4))
with pytest.raises(ValueError) as exc:
xw, yw = w.wcs_pix2world(x, y, 1)
assert exc.value.args[0] == "Coordinate arrays are not broadcastable to each other"
with pytest.raises(ValueError) as exc:
xp, yp = w.wcs_world2pix(x, y, 1)
assert exc.value.args[0] == "Coordinate arrays are not broadcastable to each other"
# There are some ambiguities that need to be worked around when
# naxis == 1
w = wcs.WCS(naxis=1)
x = np.random.random((42, 1))
xw = w.wcs_pix2world(x, 1)
assert xw.shape == (42, 1)
x = np.random.random((42,))
xw, = w.wcs_pix2world(x, 1)
assert xw.shape == (42,)
def test_invalid_shape():
# Issue #1395
w = wcs.WCS(naxis=2)
xy = np.random.random((2, 3))
with pytest.raises(ValueError) as exc:
xy2 = w.wcs_pix2world(xy, 1)
assert exc.value.args[0] == 'When providing two arguments, the array must be of shape (N, 2)'
xy = np.random.random((2, 1))
with pytest.raises(ValueError) as exc:
xy2 = w.wcs_pix2world(xy, 1)
assert exc.value.args[0] == 'When providing two arguments, the array must be of shape (N, 2)'
def test_warning_about_defunct_keywords():
def run():
header = get_pkg_data_contents(
'data/defunct_keywords.hdr', encoding='binary')
w = wcs.WCS(header)
with catch_warnings(wcs.FITSFixedWarning) as w:
run()
assert len(w) == 4
for item in w:
assert 'PCi_ja' in str(item.message)
# Make sure the warnings come out every time...
with catch_warnings(wcs.FITSFixedWarning) as w:
run()
assert len(w) == 4
for item in w:
assert 'PCi_ja' in str(item.message)
def test_warning_about_defunct_keywords_exception():
def run():
header = get_pkg_data_contents(
'data/defunct_keywords.hdr', encoding='binary')
w = wcs.WCS(header)
with pytest.raises(wcs.FITSFixedWarning):
warnings.simplefilter("error", wcs.FITSFixedWarning)
run()
# Restore warnings filter to previous state
warnings.simplefilter("default")
def test_to_header_string():
header_string = """
WCSAXES = 2 / Number of coordinate axes CRPIX1 = 0.0 / Pixel coordinate of reference point CRPIX2 = 0.0 / Pixel coordinate of reference point CDELT1 = 1.0 / Coordinate increment at reference point CDELT2 = 1.0 / Coordinate increment at reference point CRVAL1 = 0.0 / Coordinate value at reference point CRVAL2 = 0.0 / Coordinate value at reference point LATPOLE = 90.0 / [deg] Native latitude of celestial pole END"""
w = wcs.WCS()
h0 = fits.Header.fromstring(w.to_header_string().strip())
if 'COMMENT' in h0:
del h0['COMMENT']
if '' in h0:
del h0['']
h1 = fits.Header.fromstring(header_string.strip())
assert dict(h0) == dict(h1)
def test_to_fits():
w = wcs.WCS()
header_string = w.to_header()
wfits = w.to_fits()
assert isinstance(wfits, fits.HDUList)
assert isinstance(wfits[0], fits.PrimaryHDU)
assert header_string == wfits[0].header[-8:]
def test_to_header_warning():
fits_name = get_pkg_data_filename('data/sip.fits')
x = wcs.WCS(fits_name)
with catch_warnings() as w:
x.to_header()
assert len(w) == 1
assert 'A_ORDER' in str(w[0])
def test_no_comments_in_header():
w = wcs.WCS()
header = w.to_header()
assert w.wcs.alt not in header
assert 'COMMENT' + w.wcs.alt.strip() not in header
assert 'COMMENT' not in header
wkey = 'P'
header = w.to_header(key=wkey)
assert wkey not in header
assert 'COMMENT' not in header
assert 'COMMENT' + w.wcs.alt.strip() not in header
@raises(wcs.InvalidTransformError)
def test_find_all_wcs_crash():
"""
Causes a double free without a recent fix in wcslib_wrap.C
"""
with open(get_pkg_data_filename("data/too_many_pv.hdr")) as fd:
header = fd.read()
# We have to set fix=False here, because one of the fixing tasks is to
# remove redundant SCAMP distortion parameters when SIP distortion
# parameters are also present.
wcses = wcs.find_all_wcs(header, fix=False)
def test_validate():
with catch_warnings():
results = wcs.validate(get_pkg_data_filename("data/validate.fits"))
results_txt = repr(results)
version = wcs._wcs.__version__
if version[0] == '5':
if version >= '5.13':
filename = 'data/validate.5.13.txt'
else:
filename = 'data/validate.5.0.txt'
else:
filename = 'data/validate.txt'
with open(get_pkg_data_filename(filename), "r") as fd:
lines = fd.readlines()
assert set([x.strip() for x in lines]) == set([
x.strip() for x in results_txt.splitlines()])
def test_validate_with_2_wcses():
# From Issue #2053
results = wcs.validate(get_pkg_data_filename("data/2wcses.hdr"))
assert "WCS key 'A':" in str(results)
def test_crpix_maps_to_crval():
twcs = wcs.WCS(naxis=2)
twcs.wcs.crval = [251.29, 57.58]
twcs.wcs.cdelt = [1, 1]
twcs.wcs.crpix = [507, 507]
twcs.wcs.pc = np.array([[7.7e-6, 3.3e-5], [3.7e-5, -6.8e-6]])
twcs._naxis = [1014, 1014]
twcs.wcs.ctype = ['RA---TAN-SIP', 'DEC--TAN-SIP']
a = np.array(
[[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[ -2.81029767e-13, 0.0, 0.0, 0.0, 0.0]]
)
b = np.array(
[[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0]]
)
twcs.sip = wcs.Sip(a, b, None, None, twcs.wcs.crpix)
twcs.wcs.set()
pscale = np.sqrt(wcs.utils.proj_plane_pixel_area(twcs))
# test that CRPIX maps to CRVAL:
assert_allclose(
twcs.wcs_pix2world(*twcs.wcs.crpix, 1), twcs.wcs.crval,
rtol=0.0, atol=1e-6 * pscale
)
# test that CRPIX maps to CRVAL:
assert_allclose(
twcs.all_pix2world(*twcs.wcs.crpix, 1), twcs.wcs.crval,
rtol=0.0, atol=1e-6 * pscale
)
def test_all_world2pix(fname=None, ext=0,
tolerance=1.0e-4, origin=0,
random_npts=25000,
adaptive=False, maxiter=20,
detect_divergence=True):
"""Test all_world2pix, iterative inverse of all_pix2world"""
# Open test FITS file:
if fname is None:
fname = get_pkg_data_filename('data/j94f05bgq_flt.fits')
ext = ('SCI', 1)
if not os.path.isfile(fname):
raise OSError("Input file '{:s}' to 'test_all_world2pix' not found."
.format(fname))
h = fits.open(fname)
w = wcs.WCS(h[ext].header, h)
h.close()
del h
crpix = w.wcs.crpix
ncoord = crpix.shape[0]
# Assume that CRPIX is at the center of the image and that the image has
# a power-of-2 number of pixels along each axis. Only use the central
# 1/64 for this testing purpose:
naxesi_l = list((7. / 16 * crpix).astype(int))
naxesi_u = list((9. / 16 * crpix).astype(int))
# Generate integer indices of pixels (image grid):
img_pix = np.dstack([i.flatten() for i in
np.meshgrid(*map(range, naxesi_l, naxesi_u))])[0]
# Generage random data (in image coordinates):
with NumpyRNGContext(123456789):
rnd_pix = np.random.rand(random_npts, ncoord)
# Scale random data to cover the central part of the image
mwidth = 2 * (crpix * 1. / 8)
rnd_pix = crpix - 0.5 * mwidth + (mwidth - 1) * rnd_pix
# Reference pixel coordinates in image coordinate system (CS):
test_pix = np.append(img_pix, rnd_pix, axis=0)
# Reference pixel coordinates in sky CS using forward transformation:
all_world = w.all_pix2world(test_pix, origin)
try:
runtime_begin = datetime.now()
# Apply the inverse iterative process to pixels in world coordinates
# to recover the pixel coordinates in image space.
all_pix = w.all_world2pix(
all_world, origin, tolerance=tolerance, adaptive=adaptive,
maxiter=maxiter, detect_divergence=detect_divergence)
runtime_end = datetime.now()
except wcs.wcs.NoConvergence as e:
runtime_end = datetime.now()
ndiv = 0
if e.divergent is not None:
ndiv = e.divergent.shape[0]
print("There are {} diverging solutions.".format(ndiv))
print("Indices of diverging solutions:\n{}"
.format(e.divergent))
print("Diverging solutions:\n{}\n"
.format(e.best_solution[e.divergent]))
print("Mean radius of the diverging solutions: {}"
.format(np.mean(
np.linalg.norm(e.best_solution[e.divergent], axis=1))))
print("Mean accuracy of the diverging solutions: {}\n"
.format(np.mean(
np.linalg.norm(e.accuracy[e.divergent], axis=1))))
else:
print("There are no diverging solutions.")
nslow = 0
if e.slow_conv is not None:
nslow = e.slow_conv.shape[0]
print("There are {} slowly converging solutions."
.format(nslow))
print("Indices of slowly converging solutions:\n{}"
.format(e.slow_conv))
print("Slowly converging solutions:\n{}\n"
.format(e.best_solution[e.slow_conv]))
else:
print("There are no slowly converging solutions.\n")
print("There are {} converged solutions."
.format(e.best_solution.shape[0] - ndiv - nslow))
print("Best solutions (all points):\n{}"
.format(e.best_solution))
print("Accuracy:\n{}\n".format(e.accuracy))
print("\nFinished running 'test_all_world2pix' with errors.\n"
"ERROR: {}\nRun time: {}\n"
.format(e.args[0], runtime_end - runtime_begin))
raise e
# Compute differences between reference pixel coordinates and
# pixel coordinates (in image space) recovered from reference
# pixels in world coordinates:
errors = np.sqrt(np.sum(np.power(all_pix - test_pix, 2), axis=1))
meanerr = np.mean(errors)
maxerr = np.amax(errors)
print("\nFinished running 'test_all_world2pix'.\n"
"Mean error = {0:e} (Max error = {1:e})\n"
"Run time: {2}\n"
.format(meanerr, maxerr, runtime_end - runtime_begin))
assert(maxerr < 2.0 * tolerance)
def test_scamp_sip_distortion_parameters():
"""
Test parsing of WCS parameters with redundant SIP and SCAMP distortion
parameters.
"""
header = get_pkg_data_contents('data/validate.fits', encoding='binary')
w = wcs.WCS(header)
# Just check that this doesn't raise an exception.
w.all_pix2world(0, 0, 0)
def test_fixes2():
"""
From github issue #1854
"""
header = get_pkg_data_contents(
'data/nonstandard_units.hdr', encoding='binary')
with pytest.raises(wcs.InvalidTransformError):
w = wcs.WCS(header, fix=False)
def test_unit_normalization():
"""
From github issue #1918
"""
header = get_pkg_data_contents(
'data/unit.hdr', encoding='binary')
w = wcs.WCS(header)
assert w.wcs.cunit[2] == 'm/s'
def test_footprint_to_file(tmpdir):
"""
From github issue #1912
"""
# Arbitrary keywords from real data
w = wcs.WCS({'CTYPE1': 'RA---ZPN', 'CRUNIT1': 'deg',
'CRPIX1': -3.3495999e+02, 'CRVAL1': 3.185790700000e+02,
'CTYPE2': 'DEC--ZPN', 'CRUNIT2': 'deg',
'CRPIX2': 3.0453999e+03, 'CRVAL2': 4.388538000000e+01,
'PV2_1': 1., 'PV2_3': 220.})
testfile = str(tmpdir.join('test.txt'))
w.footprint_to_file(testfile)
with open(testfile, 'r') as f:
lines = f.readlines()
assert len(lines) == 4
assert lines[2] == 'ICRS\n'
assert 'color=green' in lines[3]
w.footprint_to_file(testfile, coordsys='FK5', color='red')
with open(testfile, 'r') as f:
lines = f.readlines()
assert len(lines) == 4
assert lines[2] == 'FK5\n'
assert 'color=red' in lines[3]
with pytest.raises(ValueError):
w.footprint_to_file(testfile, coordsys='FOO')
def test_validate_faulty_wcs():
"""
From github issue #2053
"""
h = fits.Header()
# Illegal WCS:
h['RADESYSA'] = 'ICRS'
h['PV2_1'] = 1.0
hdu = fits.PrimaryHDU([[0]], header=h)
hdulist = fits.HDUList([hdu])
# Check that this doesn't raise a NameError exception:
wcs.validate(hdulist)
def test_error_message():
header = get_pkg_data_contents(
'data/invalid_header.hdr', encoding='binary')
with pytest.raises(wcs.InvalidTransformError):
# Both lines are in here, because 0.4 calls .set within WCS.__init__,
# whereas 0.3 and earlier did not.
w = wcs.WCS(header, _do_set=False)
c = w.all_pix2world([[536.0, 894.0]], 0)
def test_out_of_bounds():
# See #2107
header = get_pkg_data_contents('data/zpn-hole.hdr', encoding='binary')
w = wcs.WCS(header)
ra, dec = w.wcs_pix2world(110, 110, 0)
assert np.isnan(ra)
assert np.isnan(dec)
ra, dec = w.wcs_pix2world(0, 0, 0)
assert not np.isnan(ra)
assert not np.isnan(dec)
def test_calc_footprint_1():
fits = get_pkg_data_filename('data/sip.fits')
w = wcs.WCS(fits)
axes = (1000, 1051)
ref = np.array([[202.39314493, 47.17753352],
[202.71885939, 46.94630488],
[202.94631893, 47.15855022],
[202.72053428, 47.37893142]])
footprint = w.calc_footprint(axes=axes)
assert_allclose(footprint, ref)
def test_calc_footprint_2():
""" Test calc_footprint without distortion. """
fits = get_pkg_data_filename('data/sip.fits')
w = wcs.WCS(fits)
axes = (1000, 1051)
ref = np.array([[202.39265216, 47.17756518],
[202.7469062, 46.91483312],
[203.11487481, 47.14359319],
[202.76092671, 47.40745948]])
footprint = w.calc_footprint(axes=axes, undistort=False)
assert_allclose(footprint, ref)
def test_calc_footprint_3():
""" Test calc_footprint with corner of the pixel."""
w = wcs.WCS()
w.wcs.ctype = ["GLON-CAR", "GLAT-CAR"]
w.wcs.crpix = [1.5, 5.5]
w.wcs.cdelt = [-0.1, 0.1]
axes = (2, 10)
ref = np.array([[0.1, -0.5],
[0.1, 0.5],
[359.9, 0.5],
[359.9, -0.5]])
footprint = w.calc_footprint(axes=axes, undistort=False, center=False)
assert_allclose(footprint, ref)
def test_sip():
# See #2107
header = get_pkg_data_contents('data/irac_sip.hdr', encoding='binary')
w = wcs.WCS(header)
x0, y0 = w.sip_pix2foc(200, 200, 0)
assert_allclose(72, x0, 1e-3)
assert_allclose(72, y0, 1e-3)
x1, y1 = w.sip_foc2pix(x0, y0, 0)
assert_allclose(200, x1, 1e-3)
assert_allclose(200, y1, 1e-3)
def test_printwcs():
"""
Just make sure that it runs
"""
h = get_pkg_data_contents('spectra/orion-freq-1.hdr', encoding='binary')
w = wcs.WCS(h)
w.printwcs()
h = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary')
w = wcs.WCS(h)
w.printwcs()
def test_invalid_spherical():
header = """
SIMPLE = T / conforms to FITS standard
BITPIX = 8 / array data type
WCSAXES = 2 / no comment
CTYPE1 = 'RA---TAN' / TAN (gnomic) projection
CTYPE2 = 'DEC--TAN' / TAN (gnomic) projection
EQUINOX = 2000.0 / Equatorial coordinates definition (yr)
LONPOLE = 180.0 / no comment
LATPOLE = 0.0 / no comment
CRVAL1 = 16.0531567459 / RA of reference point
CRVAL2 = 23.1148929108 / DEC of reference point
CRPIX1 = 2129 / X reference pixel
CRPIX2 = 1417 / Y reference pixel
CUNIT1 = 'deg ' / X pixel scale units
CUNIT2 = 'deg ' / Y pixel scale units
CD1_1 = -0.00912247310646 / Transformation matrix
CD1_2 = -0.00250608809647 / no comment
CD2_1 = 0.00250608809647 / no comment
CD2_2 = -0.00912247310646 / no comment
IMAGEW = 4256 / Image width, in pixels.
IMAGEH = 2832 / Image height, in pixels.
"""
f = io.StringIO(header)
header = fits.Header.fromtextfile(f)
w = wcs.WCS(header)
x, y = w.wcs_world2pix(211, -26, 0)
assert np.isnan(x) and np.isnan(y)
def test_no_iteration():
# Regression test for #3066
w = wcs.WCS(naxis=2)
with pytest.raises(TypeError) as exc:
iter(w)
assert exc.value.args[0] == "'WCS' object is not iterable"
class NewWCS(wcs.WCS):
pass
w = NewWCS(naxis=2)
with pytest.raises(TypeError) as exc:
iter(w)
assert exc.value.args[0] == "'NewWCS' object is not iterable"
@pytest.mark.skipif('_wcs.__version__[0] < "5"',
reason="TPV only works with wcslib 5.x or later")
def test_sip_tpv_agreement():
sip_header = get_pkg_data_contents(
os.path.join("data", "siponly.hdr"), encoding='binary')
tpv_header = get_pkg_data_contents(
os.path.join("data", "tpvonly.hdr"), encoding='binary')
w_sip = wcs.WCS(sip_header)
w_tpv = wcs.WCS(tpv_header)
assert_array_almost_equal(
w_sip.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv.all_pix2world([w_tpv.wcs.crpix], 1))
w_sip2 = wcs.WCS(w_sip.to_header())
w_tpv2 = wcs.WCS(w_tpv.to_header())
assert_array_almost_equal(
w_sip.all_pix2world([w_sip.wcs.crpix], 1),
w_sip2.all_pix2world([w_sip.wcs.crpix], 1))
assert_array_almost_equal(
w_tpv.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv2.all_pix2world([w_sip.wcs.crpix], 1))
assert_array_almost_equal(
w_sip2.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv2.all_pix2world([w_tpv.wcs.crpix], 1))
@pytest.mark.skipif('_wcs.__version__[0] < "5"',
reason="TPV only works with wcslib 5.x or later")
def test_tpv_copy():
# See #3904
tpv_header = get_pkg_data_contents(
os.path.join("data", "tpvonly.hdr"), encoding='binary')
w_tpv = wcs.WCS(tpv_header)
ra, dec = w_tpv.wcs_pix2world([0, 100, 200], [0, -100, 200], 0)
assert ra[0] != ra[1] and ra[1] != ra[2]
assert dec[0] != dec[1] and dec[1] != dec[2]
def test_hst_wcs():
path = get_pkg_data_filename("data/dist_lookup.fits.gz")
hdulist = fits.open(path)
# wcslib will complain about the distortion parameters if they
# weren't correctly deleted from the header
w = wcs.WCS(hdulist[1].header, hdulist)
# Exercise the main transformation functions, mainly just for
# coverage
w.p4_pix2foc([0, 100, 200], [0, -100, 200], 0)
w.det2im([0, 100, 200], [0, -100, 200], 0)
w.cpdis1 = w.cpdis1
w.cpdis2 = w.cpdis2
w.det2im1 = w.det2im1
w.det2im2 = w.det2im2
w.sip = w.sip
w.cpdis1.cdelt = w.cpdis1.cdelt
w.cpdis1.crpix = w.cpdis1.crpix
w.cpdis1.crval = w.cpdis1.crval
w.cpdis1.data = w.cpdis1.data
assert w.sip.a_order == 4
assert w.sip.b_order == 4
assert w.sip.ap_order == 0
assert w.sip.bp_order == 0
assert_array_equal(w.sip.crpix, [2048., 1024.])
wcs.WCS(hdulist[1].header, hdulist)
hdulist.close()
def test_list_naxis():
path = get_pkg_data_filename("data/dist_lookup.fits.gz")
hdulist = fits.open(path)
# wcslib will complain about the distortion parameters if they
# weren't correctly deleted from the header
w = wcs.WCS(hdulist[1].header, hdulist, naxis=['celestial'])
assert w.naxis == 2
assert w.wcs.naxis == 2
path = get_pkg_data_filename("maps/1904-66_SIN.hdr")
with open(path, 'rb') as fd:
content = fd.read()
w = wcs.WCS(content, naxis=['celestial'])
assert w.naxis == 2
assert w.wcs.naxis == 2
w = wcs.WCS(content, naxis=['spectral'])
assert w.naxis == 0
assert w.wcs.naxis == 0
hdulist.close()
def test_sip_broken():
# This header caused wcslib to segfault because it has a SIP
# specification in a non-default keyword
hdr = get_pkg_data_contents("data/sip-broken.hdr")
w = wcs.WCS(hdr)
def test_no_truncate_crval():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [50, 50, 2.12345678e11]
w.wcs.cdelt = [1e-3, 1e-3, 1e8]
w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'FREQ']
w.wcs.set()
header = w.to_header()
for ii in range(3):
assert header['CRVAL{0}'.format(ii + 1)] == w.wcs.crval[ii]
assert header['CDELT{0}'.format(ii + 1)] == w.wcs.cdelt[ii]
def test_no_truncate_crval_try2():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [50, 50, 2.12345678e11]
w.wcs.cdelt = [1e-5, 1e-5, 1e5]
w.wcs.ctype = ['RA---SIN', 'DEC--SIN', 'FREQ']
w.wcs.cunit = ['deg', 'deg', 'Hz']
w.wcs.crpix = [1, 1, 1]
w.wcs.restfrq = 2.34e11
w.wcs.set()
header = w.to_header()
for ii in range(3):
assert header['CRVAL{0}'.format(ii + 1)] == w.wcs.crval[ii]
assert header['CDELT{0}'.format(ii + 1)] == w.wcs.cdelt[ii]
def test_no_truncate_crval_p17():
"""
Regression test for https://github.com/astropy/astropy/issues/5162
"""
w = wcs.WCS(naxis=2)
w.wcs.crval = [50.1234567890123456, 50.1234567890123456]
w.wcs.cdelt = [1e-3, 1e-3]
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w.wcs.set()
header = w.to_header()
assert header['CRVAL1'] != w.wcs.crval[0]
assert header['CRVAL2'] != w.wcs.crval[1]
header = w.to_header(relax=wcs.WCSHDO_P17)
assert header['CRVAL1'] == w.wcs.crval[0]
assert header['CRVAL2'] == w.wcs.crval[1]
def test_no_truncate_using_compare():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
This one uses WCS.wcs.compare and some slightly different values
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [2.409303333333E+02, 50, 2.12345678e11]
w.wcs.cdelt = [1e-3, 1e-3, 1e8]
w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'FREQ']
w.wcs.set()
w2 = wcs.WCS(w.to_header())
w.wcs.compare(w2.wcs)
def test_passing_ImageHDU():
"""
Passing ImageHDU or PrimaryHDU and comparing it with
wcs initialized from header. For #4493.
"""
path = get_pkg_data_filename('data/validate.fits')
hdulist = fits.open(path)
wcs_hdu = wcs.WCS(hdulist[0])
wcs_header = wcs.WCS(hdulist[0].header)
assert wcs_hdu.wcs.compare(wcs_header.wcs)
wcs_hdu = wcs.WCS(hdulist[1])
wcs_header = wcs.WCS(hdulist[1].header)
assert wcs_hdu.wcs.compare(wcs_header.wcs)
hdulist.close()
def test_inconsistent_sip():
"""
Test for #4814
"""
hdr = get_pkg_data_contents("data/sip-broken.hdr")
w = wcs.WCS(hdr)
newhdr = w.to_header(relax=None)
# CTYPE should not include "-SIP" if relax is None
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
newhdr = w.to_header(relax=False)
assert('A_0_2' not in newhdr)
# CTYPE should not include "-SIP" if relax is False
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
newhdr = w.to_header(key="C")
assert('A_0_2' not in newhdr)
# Test writing header with a different key
wnew = wcs.WCS(newhdr, key='C')
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
newhdr = w.to_header(key=" ")
# Test writing a primary WCS to header
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
# Test that "-SIP" is kept into CTYPE if relax=True and
# "-SIP" was in the original header
newhdr = w.to_header(relax=True)
wnew = wcs.WCS(newhdr)
assert all(ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
assert('A_0_2' in newhdr)
# Test that SIP coefficients are also written out.
assert wnew.sip is not None
# ######### broken header ###########
# Test that "-SIP" is added to CTYPE if relax=True and
# "-SIP" was not in the original header but SIP coefficients
# are present.
w = wcs.WCS(hdr)
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
newhdr = w.to_header(relax=True)
wnew = wcs.WCS(newhdr)
assert all(ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
def test_bounds_check():
"""Test for #4957"""
w = wcs.WCS(naxis=2)
w.wcs.ctype = ["RA---CAR", "DEC--CAR"]
w.wcs.cdelt = [10, 10]
w.wcs.crval = [-90, 90]
w.wcs.crpix = [1, 1]
w.wcs.bounds_check(False, False)
ra, dec = w.wcs_pix2world(300, 0, 0)
assert_allclose(ra, -180)
assert_allclose(dec, -30)
def test_naxis():
w = wcs.WCS(naxis=2)
w.wcs.crval = [1, 1]
w.wcs.cdelt = [0.1, 0.1]
w.wcs.crpix = [1, 1]
w._naxis = [1000, 500]
assert w._naxis1 == 1000
assert w._naxis2 == 500
w._naxis1 = 99
w._naxis2 = 59
assert w._naxis == [99, 59]
def test_sip_with_altkey():
"""
Test that when creating a WCS object using a key, CTYPE with
that key is looked at and not the primary CTYPE.
fix for #5443.
"""
with fits.open(get_pkg_data_filename('data/sip.fits')) as f:
w = wcs.WCS(f[0].header)
# create a header with two WCSs.
h1 = w.to_header(relax=True, key='A')
h2 = w.to_header(relax=False)
h1['CTYPE1A'] = "RA---SIN-SIP"
h1['CTYPE2A'] = "DEC--SIN-SIP"
h1.update(h2)
w = wcs.WCS(h1, key='A')
assert (w.wcs.ctype == np.array(['RA---SIN-SIP', 'DEC--SIN-SIP'])).all()
def test_to_fits_1():
"""
Test to_fits() with LookupTable distortion.
"""
fits_name = get_pkg_data_filename('data/dist.fits')
w = wcs.WCS(fits_name)
wfits = w.to_fits()
assert isinstance(wfits, fits.HDUList)
assert isinstance(wfits[0], fits.PrimaryHDU)
assert isinstance(wfits[1], fits.ImageHDU)
def test_keyedsip():
"""
Test sip reading with extra key.
"""
hdr_name = get_pkg_data_filename('data/sip-broken.hdr')
header = fits.Header.fromfile(hdr_name)
del header[str("CRPIX1")]
del header[str("CRPIX2")]
w=wcs.WCS(header=header,key="A")
assert isinstance( w.sip, wcs.Sip )
assert w.sip.crpix[0] == 2048
assert w.sip.crpix[1] == 1026
|
53a868e65ef27abc94b332511daf313f8354b4c49f4218b01cae0513a19ebec9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import subprocess
import sys
import pytest
def test_wcsapi_extension(tmpdir):
# Test that we can build a simple C extension with the astropy.wcs C API
build_dir = tmpdir.mkdir('build').strpath
install_dir = tmpdir.mkdir('install').strpath
setup_path = os.path.dirname(__file__)
astropy_path = os.path.abspath(
os.path.join(setup_path, '..', '..', '..', '..'))
env = os.environ.copy()
paths = [install_dir, astropy_path]
if env.get('PYTHONPATH'):
paths.append(env.get('PYTHONPATH'))
env[str('PYTHONPATH')] = str(os.pathsep.join(paths))
# Build the extension
# This used to use subprocess.check_call, but on Python 3.4 there was
# a mysterious Heisenbug causing this to fail with a non-zero exit code
# *unless* the output is redirected. This bug also did not occur in an
# interactive session, so it likely had something to do with pytest's
# output capture
p = subprocess.Popen([sys.executable, 'setup.py', 'build',
'--build-base={0}'.format(build_dir), 'install',
'--install-lib={0}'.format(install_dir),
astropy_path], cwd=setup_path, env=env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Whether the process fails or not this isn't likely to produce a great
# deal of output so communicate should be fine in almost all cases
stdout, stderr = p.communicate()
try:
stdout, stderr = stdout.decode('utf8'), stderr.decode('utf8')
except UnicodeDecodeError:
# Don't try to guess about encoding; just display the text
stdout, stderr = stdout.decode('latin1'), stderr.decode('latin1')
# If compilation fails, we can skip this test, since the
# dependencies necessary to compile an extension may be missing.
# If it passes, however, we want to continue and ensure that the
# extension created is actually usable. However, if we're on
# Travis-CI, or another generic continuous integration setup, we
# don't want to ever skip, because having it fail in that
# environment probably indicates something more serious that we
# want to know about.
if (not (str('CI') in os.environ or
str('TRAVIS') in os.environ or
str('CONTINUOUS_INTEGRATION') in os.environ) and
p.returncode):
pytest.skip("system unable to compile extensions")
return
assert p.returncode == 0, (
"setup.py exited with non-zero return code {0}\n"
"stdout:\n\n{1}\n\nstderr:\n\n{2}\n".format(
p.returncode, stdout, stderr))
code = """
import sys
import wcsapi_test
sys.exit(wcsapi_test.test())
"""
code = code.strip().replace('\n', '; ')
# Import and run the extension
subprocess.check_call([sys.executable, '-c', code], env=env)
|
9e254521084bab2ed1b5fdefc512caf0d639c59b9a32ab7a6039f8c56a20b8e3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import itertools
import numpy as np
import operator
import pytest
from datetime import timedelta
from .. import (Time, TimeDelta, OperandTypeError, ScaleValueError,
TIME_SCALES, STANDARD_TIME_SCALES, TIME_DELTA_SCALES)
from ... import units as u
allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
class TestTimeDelta():
"""Test TimeDelta class"""
def setup(self):
self.t = Time('2010-01-01', scale='utc')
self.t2 = Time('2010-01-02 00:00:01', scale='utc')
self.t3 = Time('2010-01-03 01:02:03', scale='utc', precision=9,
in_subfmt='date_hms', out_subfmt='date_hm',
location=(-75.*u.degree, 30.*u.degree, 500*u.m))
self.t4 = Time('2010-01-01', scale='local')
self.dt = TimeDelta(100.0, format='sec')
self.dt_array = TimeDelta(np.arange(100, 1000, 100), format='sec')
def test_sub(self):
# time - time
dt = self.t2 - self.t
assert (repr(dt).startswith("<TimeDelta object: scale='tai' "
"format='jd' value=1.00001157407"))
assert allclose_jd(dt.jd, 86401.0 / 86400.0)
assert allclose_sec(dt.sec, 86401.0)
# time - delta_time
t = self.t2 - dt
assert t.iso == self.t.iso
# delta_time - delta_time
dt2 = dt - self.dt
assert allclose_sec(dt2.sec, 86301.0)
# delta_time - time
with pytest.raises(OperandTypeError):
dt - self.t
def test_add(self):
# time + time
with pytest.raises(OperandTypeError):
self.t2 + self.t
# time + delta_time
dt = self.t2 - self.t
t2 = self.t + dt
assert t2.iso == self.t2.iso
# delta_time + delta_time
dt2 = dt + self.dt
assert allclose_sec(dt2.sec, 86501.0)
# delta_time + time
dt = self.t2 - self.t
t2 = dt + self.t
assert t2.iso == self.t2.iso
def test_add_vector(self):
"""Check time arithmetic as well as properly keeping track of whether
a time is a scalar or a vector"""
t = Time(0.0, format='mjd', scale='utc')
t2 = Time([0.0, 1.0], format='mjd', scale='utc')
dt = TimeDelta(100.0, format='jd')
dt2 = TimeDelta([100.0, 200.0], format='jd')
out = t + dt
assert allclose_jd(out.mjd, 100.0)
assert out.isscalar
out = t + dt2
assert allclose_jd(out.mjd, [100.0, 200.0])
assert not out.isscalar
out = t2 + dt
assert allclose_jd(out.mjd, [100.0, 101.0])
assert not out.isscalar
out = dt + dt
assert allclose_jd(out.jd, 200.0)
assert out.isscalar
out = dt + dt2
assert allclose_jd(out.jd, [200.0, 300.0])
assert not out.isscalar
# Reverse the argument order
out = dt + t
assert allclose_jd(out.mjd, 100.0)
assert out.isscalar
out = dt2 + t
assert allclose_jd(out.mjd, [100.0, 200.0])
assert not out.isscalar
out = dt + t2
assert allclose_jd(out.mjd, [100.0, 101.0])
assert not out.isscalar
out = dt2 + dt
assert allclose_jd(out.jd, [200.0, 300.0])
assert not out.isscalar
def test_sub_vector(self):
"""Check time arithmetic as well as properly keeping track of whether
a time is a scalar or a vector"""
t = Time(0.0, format='mjd', scale='utc')
t2 = Time([0.0, 1.0], format='mjd', scale='utc')
dt = TimeDelta(100.0, format='jd')
dt2 = TimeDelta([100.0, 200.0], format='jd')
out = t - dt
assert allclose_jd(out.mjd, -100.0)
assert out.isscalar
out = t - dt2
assert allclose_jd(out.mjd, [-100.0, -200.0])
assert not out.isscalar
out = t2 - dt
assert allclose_jd(out.mjd, [-100.0, -99.0])
assert not out.isscalar
out = dt - dt
assert allclose_jd(out.jd, 0.0)
assert out.isscalar
out = dt - dt2
assert allclose_jd(out.jd, [0.0, -100.0])
assert not out.isscalar
@pytest.mark.parametrize('values', [(2455197.5, 2455198.5),
([2455197.5], [2455198.5])])
def test_copy_timedelta(self, values):
"""Test copying the values of a TimeDelta object by passing it into the
Time initializer.
"""
val1, val2 = values
t = Time(val1, format='jd', scale='utc')
t2 = Time(val2, format='jd', scale='utc')
dt = t2 - t
dt2 = TimeDelta(dt, copy=False)
assert np.all(dt.jd == dt2.jd)
assert dt._time.jd1 is dt2._time.jd1
assert dt._time.jd2 is dt2._time.jd2
dt2 = TimeDelta(dt, copy=True)
assert np.all(dt.jd == dt2.jd)
assert dt._time.jd1 is not dt2._time.jd1
assert dt._time.jd2 is not dt2._time.jd2
# Include initializers
dt2 = TimeDelta(dt, format='sec')
assert allclose_sec(dt2.value, 86400.0)
def test_neg_abs(self):
for dt in (self.dt, self.dt_array):
dt2 = -dt
assert np.all(dt2.jd == -dt.jd)
dt3 = abs(dt)
assert np.all(dt3.jd == dt.jd)
dt4 = abs(dt2)
assert np.all(dt4.jd == dt.jd)
def test_mul_div(self):
for dt in (self.dt, self.dt_array):
dt2 = dt + dt + dt
dt3 = 3. * dt
assert allclose_jd(dt2.jd, dt3.jd)
dt4 = dt3 / 3.
assert allclose_jd(dt4.jd, dt.jd)
dt5 = self.dt * np.arange(3)
assert dt5[0].jd == 0.
assert dt5[-1].jd == (self.dt + self.dt).jd
with pytest.raises(OperandTypeError):
self.dt * self.dt
with pytest.raises(OperandTypeError):
self.dt * self.t
def test_keep_properties(self):
# closes #1924 (partially)
dt = TimeDelta(1000., format='sec')
for t in (self.t, self.t3):
ta = t + dt
assert ta.location is t.location
assert ta.precision == t.precision
assert ta.in_subfmt == t.in_subfmt
assert ta.out_subfmt == t.out_subfmt
tr = dt + t
assert tr.location is t.location
assert tr.precision == t.precision
assert tr.in_subfmt == t.in_subfmt
assert tr.out_subfmt == t.out_subfmt
ts = t - dt
assert ts.location is t.location
assert ts.precision == t.precision
assert ts.in_subfmt == t.in_subfmt
assert ts.out_subfmt == t.out_subfmt
t_tdb = self.t.tdb
assert hasattr(t_tdb, '_delta_tdb_tt')
assert not hasattr(t_tdb, '_delta_ut1_utc')
t_tdb_ut1 = t_tdb.ut1
assert hasattr(t_tdb_ut1, '_delta_tdb_tt')
assert hasattr(t_tdb_ut1, '_delta_ut1_utc')
t_tdb_ut1_utc = t_tdb_ut1.utc
assert hasattr(t_tdb_ut1_utc, '_delta_tdb_tt')
assert hasattr(t_tdb_ut1_utc, '_delta_ut1_utc')
# adding or subtracting some time should remove the delta's
# since these are time-dependent and should be recalculated
for op in (operator.add, operator.sub):
t1 = op(t_tdb, dt)
assert not hasattr(t1, '_delta_tdb_tt')
assert not hasattr(t1, '_delta_ut1_utc')
t2 = op(t_tdb_ut1, dt)
assert not hasattr(t2, '_delta_tdb_tt')
assert not hasattr(t2, '_delta_ut1_utc')
t3 = op(t_tdb_ut1_utc, dt)
assert not hasattr(t3, '_delta_tdb_tt')
assert not hasattr(t3, '_delta_ut1_utc')
def test_set_format(self):
"""
Test basics of setting format attribute.
"""
dt = TimeDelta(86400.0, format='sec')
assert dt.value == 86400.0
assert dt.format == 'sec'
dt.format = 'jd'
assert dt.value == 1.0
assert dt.format == 'jd'
dt.format = 'datetime'
assert dt.value == timedelta(days=1)
assert dt.format == 'datetime'
class TestTimeDeltaScales():
"""Test scale conversion for Time Delta.
Go through @taldcroft's list of expected behaviour from #1932"""
def setup(self):
# pick a date that includes a leap second for better testing
self.iso_times = ['2012-06-30 12:00:00', '2012-06-30 23:59:59',
'2012-07-01 00:00:00', '2012-07-01 12:00:00']
self.t = dict((scale, Time(self.iso_times, scale=scale, precision=9))
for scale in TIME_SCALES)
self.dt = dict((scale, self.t[scale]-self.t[scale][0])
for scale in TIME_SCALES)
def test_delta_scales_definition(self):
for scale in list(TIME_DELTA_SCALES) + [None]:
TimeDelta([0., 1., 10.], format='sec', scale=scale)
with pytest.raises(ScaleValueError):
TimeDelta([0., 1., 10.], format='sec', scale='utc')
@pytest.mark.parametrize(('scale1', 'scale2'),
list(itertools.product(STANDARD_TIME_SCALES,
STANDARD_TIME_SCALES)))
def test_standard_scales_for_time_minus_time(self, scale1, scale2):
"""T(X) - T2(Y) -- does T(X) - T2(Y).X and return dT(X)
and T(X) +/- dT(Y) -- does (in essence) (T(X).Y +/- dT(Y)).X
I.e., time differences of two times should have the scale of the
first time. The one exception is UTC, which returns TAI.
There are no standard timescales for which this does not work.
"""
t1 = self.t[scale1]
t2 = self.t[scale2]
dt = t1 - t2
if scale1 in TIME_DELTA_SCALES:
assert dt.scale == scale1
else:
assert scale1 == 'utc'
assert dt.scale == 'tai'
# now check with delta time; also check reversibility
t1_recover_t2_scale = t2 + dt
assert t1_recover_t2_scale.scale == scale2
t1_recover = getattr(t1_recover_t2_scale, scale1)
assert allclose_jd(t1_recover.jd, t1.jd)
t2_recover_t1_scale = t1 - dt
assert t2_recover_t1_scale.scale == scale1
t2_recover = getattr(t2_recover_t1_scale, scale2)
assert allclose_jd(t2_recover.jd, t2.jd)
def test_local_scales_for_time_minus_time(self):
""" T1(local) - T2(local) should return dT(local)
T1(local) +/- dT(local) or T1(local) +/- Quantity(time-like) should
also return T(local)
I.e. Tests that time differences of two local scale times should
return delta time with local timescale. Furthermore, checks that
arithmetic of T(local) with dT(None) or time-like quantity does work.
Also tests that subtracting two Time objects, one having local time
scale and other having standard time scale should raise TypeError.
"""
t1 = self.t['local']
t2 = Time('2010-01-01', scale='local')
dt = t1 - t2
assert dt.scale == 'local'
# now check with delta time
t1_recover = t2 + dt
assert t1_recover.scale == 'local'
assert allclose_jd(t1_recover.jd, t1.jd)
# check that dT(None) can be subtracted from T(local)
dt2 = TimeDelta([10.], format='sec', scale=None)
t3 = t2 - dt2
assert t3.scale == t2.scale
# check that time quantity can be subtracted from T(local)
q = 10 * u.s
assert (t2 - q).value == (t2 - dt2).value
# Check that one cannot subtract/add times with a standard scale
# from a local one (or vice versa)
t1 = self.t['local']
for scale in STANDARD_TIME_SCALES:
t2 = self.t[scale]
with pytest.raises(TypeError):
t1 - t2
with pytest.raises(TypeError):
t2 - t1
with pytest.raises(TypeError):
t2 - dt
with pytest.raises(TypeError):
t2 + dt
with pytest.raises(TypeError):
dt + t2
def test_scales_for_delta_minus_delta(self):
"""dT(X) +/- dT2(Y) -- Add/substract JDs for dT(X) and dT(Y).X
I.e. this will succeed if dT(Y) can be converted to scale X.
Returns delta time in scale X
"""
# geocentric timescales
dt_tai = self.dt['tai']
dt_tt = self.dt['tt']
dt0 = dt_tai - dt_tt
assert dt0.scale == 'tai'
# tai and tt have the same scale, so differences should be the same
assert allclose_sec(dt0.sec, 0.)
dt_tcg = self.dt['tcg']
dt1 = dt_tai - dt_tcg
assert dt1.scale == 'tai'
# tai and tcg do not have the same scale, so differences different
assert not allclose_sec(dt1.sec, 0.)
t_tai_tcg = self.t['tai'].tcg
dt_tai_tcg = t_tai_tcg - t_tai_tcg[0]
dt2 = dt_tai - dt_tai_tcg
assert dt2.scale == 'tai'
# but if tcg difference calculated from tai, it should roundtrip
assert allclose_sec(dt2.sec, 0.)
# check that if we put TCG first, we get a TCG scale back
dt3 = dt_tai_tcg - dt_tai
assert dt3.scale == 'tcg'
assert allclose_sec(dt3.sec, 0.)
for scale in 'tdb', 'tcb', 'ut1':
with pytest.raises(TypeError):
dt_tai - self.dt[scale]
# barycentric timescales
dt_tcb = self.dt['tcb']
dt_tdb = self.dt['tdb']
dt4 = dt_tcb - dt_tdb
assert dt4.scale == 'tcb'
assert not allclose_sec(dt1.sec, 0.)
t_tcb_tdb = self.t['tcb'].tdb
dt_tcb_tdb = t_tcb_tdb - t_tcb_tdb[0]
dt5 = dt_tcb - dt_tcb_tdb
assert dt5.scale == 'tcb'
assert allclose_sec(dt5.sec, 0.)
for scale in 'utc', 'tai', 'tt', 'tcg', 'ut1':
with pytest.raises(TypeError):
dt_tcb - self.dt[scale]
# rotational timescale
dt_ut1 = self.dt['ut1']
dt5 = dt_ut1 - dt_ut1[-1]
assert dt5.scale == 'ut1'
assert dt5[-1].sec == 0.
for scale in 'utc', 'tai', 'tt', 'tcg', 'tcb', 'tdb':
with pytest.raises(TypeError):
dt_ut1 - self.dt[scale]
# local time scale
dt_local = self.dt['local']
dt6 = dt_local - dt_local[-1]
assert dt6.scale == 'local'
assert dt6[-1].sec == 0.
for scale in 'utc', 'tai', 'tt', 'tcg', 'tcb', 'tdb', 'ut1':
with pytest.raises(TypeError):
dt_local - self.dt[scale]
@pytest.mark.parametrize(
('scale', 'op'), list(itertools.product(TIME_SCALES,
(operator.add, operator.sub))))
def test_scales_for_delta_scale_is_none(self, scale, op):
"""T(X) +/- dT(None) or T(X) +/- Quantity(time-like)
This is always allowed and just adds JDs, i.e., the scale of
the TimeDelta or time-like Quantity will be taken to be X.
The one exception is again for X=UTC, where TAI is assumed instead,
so that a day is always defined as 86400 seconds.
"""
dt_none = TimeDelta([0., 1., -1., 1000.], format='sec')
assert dt_none.scale is None
q_time = dt_none.to('s')
dt = self.dt[scale]
dt1 = op(dt, dt_none)
assert dt1.scale == dt.scale
assert allclose_jd(dt1.jd, op(dt.jd, dt_none.jd))
dt2 = op(dt_none, dt)
assert dt2.scale == dt.scale
assert allclose_jd(dt2.jd, op(dt_none.jd, dt.jd))
dt3 = op(q_time, dt)
assert dt3.scale == dt.scale
assert allclose_jd(dt3.jd, dt2.jd)
t = self.t[scale]
t1 = op(t, dt_none)
assert t1.scale == t.scale
assert allclose_jd(t1.jd, op(t.jd, dt_none.jd))
if op is operator.add:
t2 = op(dt_none, t)
assert t2.scale == t.scale
assert allclose_jd(t2.jd, t1.jd)
t3 = op(t, q_time)
assert t3.scale == t.scale
assert allclose_jd(t3.jd, t1.jd)
@pytest.mark.parametrize('scale', TIME_SCALES)
def test_delta_day_is_86400_seconds(self, scale):
"""TimeDelta or Quantity holding 1 day always means 24*60*60 seconds
This holds true for all timescales but UTC, for which leap-second
days are longer or shorter by one second.
"""
t = self.t[scale]
dt_day = TimeDelta(1., format='jd')
q_day = dt_day.to('day')
dt_day_leap = t[-1] - t[0]
# ^ = exclusive or, so either equal and not UTC, or not equal and UTC
assert allclose_jd(dt_day_leap.jd, dt_day.jd) ^ (scale == 'utc')
t1 = t[0] + dt_day
assert allclose_jd(t1.jd, t[-1].jd) ^ (scale == 'utc')
t2 = q_day + t[0]
assert allclose_jd(t2.jd, t[-1].jd) ^ (scale == 'utc')
t3 = t[-1] - dt_day
assert allclose_jd(t3.jd, t[0].jd) ^ (scale == 'utc')
t4 = t[-1] - q_day
assert allclose_jd(t4.jd, t[0].jd) ^ (scale == 'utc')
def test_timedelta_setitem():
t = TimeDelta([1, 2, 3] * u.d, format='jd')
t[0] = 0.5
assert allclose_jd(t.value, [0.5, 2, 3])
t[1:] = 4.5
assert allclose_jd(t.value, [0.5, 4.5, 4.5])
t[:] = 86400 * u.s
assert allclose_jd(t.value, [1, 1, 1])
t[1] = TimeDelta(2, format='jd')
assert allclose_jd(t.value, [1, 2, 1])
with pytest.raises(ValueError) as err:
t[1] = 1 * u.m
assert 'cannot convert value to a compatible TimeDelta' in str(err)
def test_timedelta_mask():
t = TimeDelta([1, 2] * u.d, format='jd')
t[1] = np.ma.masked
assert np.all(t.mask == [False, True])
assert allclose_jd(t[0].value, 1)
assert t.value[1] is np.ma.masked
def test_python_timedelta_scalar():
td = timedelta(days=1, seconds=1)
td1 = TimeDelta(td, format='datetime')
assert td1.sec == 86401.0
td2 = TimeDelta(86401.0, format='sec')
assert td2.datetime == td
def test_python_timedelta_vector():
td = [[timedelta(days=1), timedelta(days=2)],
[timedelta(days=3), timedelta(days=4)]]
td1 = TimeDelta(td, format='datetime')
assert np.all(td1.jd == [[1, 2], [3, 4]])
td2 = TimeDelta([[1, 2], [3, 4]], format='jd')
assert np.all(td2.datetime == td)
def test_timedelta_to_datetime():
td = TimeDelta(1, format='jd')
assert td.to_datetime() == timedelta(days=1)
td2 = TimeDelta([[1, 2], [3, 4]], format='jd')
td = [[timedelta(days=1), timedelta(days=2)],
[timedelta(days=3), timedelta(days=4)]]
assert np.all(td2.to_datetime() == td)
|
2917e0423dc7307a430925cbc8d3fd70d977b2fc36cfca88ed957569742620b8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import functools
import datetime
from copy import deepcopy
import numpy as np
from numpy.testing import assert_allclose
from ...tests.helper import catch_warnings, pytest
from ...utils import isiterable
from .. import Time, ScaleValueError, STANDARD_TIME_SCALES, TimeString, TimezoneInfo
from ...coordinates import EarthLocation
from ... import units as u
from ... import _erfa as erfa
from ...table import Column
try:
import pytz
HAS_PYTZ = True
except ImportError:
HAS_PYTZ = False
allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
allclose_year = functools.partial(np.allclose, rtol=2. ** -52,
atol=0.) # 14 microsec at current epoch
def setup_function(func):
func.FORMATS_ORIG = deepcopy(Time.FORMATS)
def teardown_function(func):
Time.FORMATS.clear()
Time.FORMATS.update(func.FORMATS_ORIG)
class TestBasic():
"""Basic tests stemming from initial example and API reference"""
def test_simple(self):
times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00']
t = Time(times, format='iso', scale='utc')
assert (repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5+1.4288980208333335e-06,
-0.50000000e+00]))
# Set scale to TAI
t = t.tai
assert (repr(t) == "<Time object: scale='tai' format='iso' "
"value=['1999-01-01 00:00:32.123' '2010-01-01 00:00:34.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5+0.00037179926839122024,
-0.5+0.00039351851851851852]))
# Get a new ``Time`` object which is referenced to the TT scale
# (internal JD1 and JD1 are now with respect to TT scale)"""
assert (repr(t.tt) == "<Time object: scale='tt' format='iso' "
"value=['1999-01-01 00:01:04.307' '2010-01-01 00:01:06.184']>")
# Get the representation of the ``Time`` object in a particular format
# (in this case seconds since 1998.0). This returns either a scalar or
# array, depending on whether the input was a scalar or array"""
assert allclose_sec(t.cxcsec, np.array([31536064.307456788, 378691266.18400002]))
def test_different_dimensions(self):
"""Test scalars, vector, and higher-dimensions"""
# scalar
val, val1 = 2450000.0, 0.125
t1 = Time(val, val1, format='jd')
assert t1.isscalar is True and t1.shape == ()
# vector
val = np.arange(2450000., 2450010.)
t2 = Time(val, format='jd')
assert t2.isscalar is False and t2.shape == val.shape
# explicitly check broadcasting for mixed vector, scalar.
val2 = 0.
t3 = Time(val, val2, format='jd')
assert t3.isscalar is False and t3.shape == val.shape
val2 = (np.arange(5.)/10.).reshape(5, 1)
# now see if broadcasting to two-dimensional works
t4 = Time(val, val2, format='jd')
assert t4.isscalar is False
assert t4.shape == np.broadcast(val, val2).shape
@pytest.mark.parametrize('value', [2455197.5, [2455197.5]])
def test_copy_time(self, value):
"""Test copying the values of a Time object by passing it into the
Time initializer.
"""
t = Time(value, format='jd', scale='utc')
t2 = Time(t, copy=False)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is t2._time.jd1
assert t._time.jd2 is t2._time.jd2
t2 = Time(t, copy=True)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is not t2._time.jd1
assert t._time.jd2 is not t2._time.jd2
# Include initializers
t2 = Time(t, format='iso', scale='tai', precision=1)
assert t2.value == '2010-01-01 00:00:34.0'
t2 = Time(t, format='iso', scale='tai', out_subfmt='date')
assert t2.value == '2010-01-01'
def test_getitem(self):
"""Test that Time objects holding arrays are properly subscriptable,
set isscalar as appropriate, and also subscript delta_ut1_utc, etc."""
mjd = np.arange(50000, 50010)
t = Time(mjd, format='mjd', scale='utc', location=('45d', '50d'))
t1 = t[3]
assert t1.isscalar is True
assert t1._time.jd1 == t._time.jd1[3]
assert t1.location is t.location
t1a = Time(mjd[3], format='mjd', scale='utc')
assert t1a.isscalar is True
assert np.all(t1._time.jd1 == t1a._time.jd1)
t1b = Time(t[3])
assert t1b.isscalar is True
assert np.all(t1._time.jd1 == t1b._time.jd1)
t2 = t[4:6]
assert t2.isscalar is False
assert np.all(t2._time.jd1 == t._time.jd1[4:6])
assert t2.location is t.location
t2a = Time(t[4:6])
assert t2a.isscalar is False
assert np.all(t2a._time.jd1 == t._time.jd1[4:6])
t2b = Time([t[4], t[5]])
assert t2b.isscalar is False
assert np.all(t2b._time.jd1 == t._time.jd1[4:6])
t2c = Time((t[4], t[5]))
assert t2c.isscalar is False
assert np.all(t2c._time.jd1 == t._time.jd1[4:6])
t.delta_tdb_tt = np.arange(len(t)) # Explicitly set (not testing .tdb)
t3 = t[4:6]
assert np.all(t3._delta_tdb_tt == t._delta_tdb_tt[4:6])
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.arange(len(mjd)), np.arange(len(mjd))))
t5 = t4[3]
assert t5.location == t4.location[3]
t6 = t4[4:6]
assert np.all(t6.location == t4.location[4:6])
# check it is a view
# (via ndarray, since quantity setter problematic for structured array)
allzeros = np.array((0., 0., 0.), dtype=t4.location.dtype)
assert t6.location.view(np.ndarray)[-1] != allzeros
assert t4.location.view(np.ndarray)[5] != allzeros
t6.location.view(np.ndarray)[-1] = allzeros
assert t4.location.view(np.ndarray)[5] == allzeros
# Test subscription also works for two-dimensional arrays.
frac = np.arange(0., 0.999, 0.2)
t7 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=('45d', '50d'))
assert t7[0, 0]._time.jd1 == t7._time.jd1[0, 0]
assert t7[0, 0].isscalar is True
assert np.all(t7[5]._time.jd1 == t7._time.jd1[5])
assert np.all(t7[5]._time.jd2 == t7._time.jd2[5])
assert np.all(t7[:, 2]._time.jd1 == t7._time.jd1[:, 2])
assert np.all(t7[:, 2]._time.jd2 == t7._time.jd2[:, 2])
assert np.all(t7[:, 0]._time.jd1 == t._time.jd1)
assert np.all(t7[:, 0]._time.jd2 == t._time.jd2)
# Get tdb to check that delta_tdb_tt attribute is sliced properly.
t7_tdb = t7.tdb
assert t7_tdb[0, 0].delta_tdb_tt == t7_tdb.delta_tdb_tt[0, 0]
assert np.all(t7_tdb[5].delta_tdb_tt == t7_tdb.delta_tdb_tt[5])
assert np.all(t7_tdb[:, 2].delta_tdb_tt == t7_tdb.delta_tdb_tt[:, 2])
# Explicitly set delta_tdb_tt attribute. Now it should not be sliced.
t7.delta_tdb_tt = 0.1
t7_tdb2 = t7.tdb
assert t7_tdb2[0, 0].delta_tdb_tt == 0.1
assert t7_tdb2[5].delta_tdb_tt == 0.1
assert t7_tdb2[:, 2].delta_tdb_tt == 0.1
# Check broadcasting of location.
t8 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=(np.arange(len(frac)), np.arange(len(frac))))
assert t8[0, 0].location == t8.location[0, 0]
assert np.all(t8[5].location == t8.location[5])
assert np.all(t8[:, 2].location == t8.location[:, 2])
# Finally check empty array.
t9 = t[:0]
assert t9.isscalar is False
assert t9.shape == (0,)
assert t9.size == 0
def test_properties(self):
"""Use properties to convert scales and formats. Note that the UT1 to
UTC transformation requires a supplementary value (``delta_ut1_utc``)
that can be obtained by interpolating from a table supplied by IERS.
This is tested separately."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert allclose_jd(t.jd, 2455197.5)
assert t.iso == '2010-01-01 00:00:00.000'
assert t.tt.iso == '2010-01-01 00:01:06.184'
assert t.tai.fits == '2010-01-01T00:00:34.000(TAI)'
assert allclose_jd(t.utc.jd, 2455197.5)
assert allclose_jd(t.ut1.jd, 2455197.500003867)
assert t.tcg.isot == '2010-01-01T00:01:06.910'
assert allclose_sec(t.unix, 1262304000.0)
assert allclose_sec(t.cxcsec, 378691266.184)
assert allclose_sec(t.gps, 946339215.0)
assert t.datetime == datetime.datetime(2010, 1, 1)
def test_precision(self):
"""Set the output precision which is used for some formats. This is
also a test of the code that provides a dict for global and instance
options."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
# Uses initial class-defined precision=3
assert t.iso == '2010-01-01 00:00:00.000'
# Set instance precision to 9
t.precision = 9
assert t.iso == '2010-01-01 00:00:00.000000000'
assert t.tai.utc.iso == '2010-01-01 00:00:00.000000000'
def test_transforms(self):
"""Transform from UTC to all supported time scales (TAI, TCB, TCG,
TDB, TT, UT1, UTC). This requires auxiliary information (latitude and
longitude)."""
lat = 19.48125
lon = -155.933222
t = Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=6, location=(lon, lat))
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == '2006-01-15 21:24:37.500000'
assert t.ut1.iso == '2006-01-15 21:24:37.834100'
assert t.tai.iso == '2006-01-15 21:25:10.500000'
assert t.tt.iso == '2006-01-15 21:25:42.684000'
assert t.tcg.iso == '2006-01-15 21:25:43.322690'
assert t.tdb.iso == '2006-01-15 21:25:42.684373'
assert t.tcb.iso == '2006-01-15 21:25:56.893952'
def test_location(self):
"""Check that location creates an EarthLocation object, and that
such objects can be used as arguments.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(lon, lat))
assert isinstance(t.location, EarthLocation)
location = EarthLocation(lon, lat)
t2 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=location)
assert isinstance(t2.location, EarthLocation)
assert t2.location == t.location
t3 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(location.x, location.y, location.z))
assert isinstance(t3.location, EarthLocation)
assert t3.location == t.location
def test_location_array(self):
"""Check that location arrays are checked for size and used
for the corresponding times. Also checks that erfa
can handle array-valued locations, and can broadcast these if needed.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5']*2, format='iso', scale='utc',
precision=6, location=(lon, lat))
assert np.all(t.utc.iso == '2006-01-15 21:24:37.500000')
assert np.all(t.tdb.iso[0] == '2006-01-15 21:25:42.684373')
t2 = Time(['2006-01-15 21:24:37.5']*2, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
assert np.all(t2.utc.iso == '2006-01-15 21:24:37.500000')
assert t2.tdb.iso[0] == '2006-01-15 21:25:42.684373'
assert t2.tdb.iso[1] != '2006-01-15 21:25:42.684373'
with pytest.raises(ValueError): # 1 time, but two locations
Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
with pytest.raises(ValueError): # 3 times, but two locations
Time(['2006-01-15 21:24:37.5']*3, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
# multidimensional
mjd = np.arange(50000., 50008.).reshape(4, 2)
t3 = Time(mjd, format='mjd', scale='utc', location=(lon, lat))
assert t3.shape == (4, 2)
assert t3.location.shape == ()
assert t3.tdb.shape == t3.shape
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.array([lon, 0]), np.array([lat, 0])))
assert t4.shape == (4, 2)
assert t4.location.shape == t4.shape
assert t4.tdb.shape == t4.shape
t5 = Time(mjd, format='mjd', scale='utc',
location=(np.array([[lon], [0], [0], [0]]),
np.array([[lat], [0], [0], [0]])))
assert t5.shape == (4, 2)
assert t5.location.shape == t5.shape
assert t5.tdb.shape == t5.shape
def test_all_scale_transforms(self):
"""Test that standard scale transforms work. Does not test correctness,
except reversibility [#2074]. Also tests that standard scales can't be
converted to local scales"""
lat = 19.48125
lon = -155.933222
for scale1 in STANDARD_TIME_SCALES:
t1 = Time('2006-01-15 21:24:37.5', format='iso', scale=scale1,
location=(lon, lat))
for scale2 in STANDARD_TIME_SCALES:
t2 = getattr(t1, scale2)
t21 = getattr(t2, scale1)
assert allclose_jd(t21.jd, t1.jd)
# test for conversion to local scale
scale3 = 'local'
with pytest.raises(ScaleValueError):
t2 = getattr(t1, scale3)
def test_creating_all_formats(self):
"""Create a time object using each defined format"""
Time(2000.5, format='decimalyear')
Time(100.0, format='cxcsec')
Time(100.0, format='unix')
Time(100.0, format='gps')
Time(1950.0, format='byear', scale='tai')
Time(2000.0, format='jyear', scale='tai')
Time('B1950.0', format='byear_str', scale='tai')
Time('J2000.0', format='jyear_str', scale='tai')
Time('2000-01-01 12:23:34.0', format='iso', scale='tai')
Time('2000-01-01 12:23:34.0Z', format='iso', scale='utc')
Time('2000-01-01T12:23:34.0', format='isot', scale='tai')
Time('2000-01-01T12:23:34.0Z', format='isot', scale='utc')
Time('2000-01-01T12:23:34.0', format='fits')
Time('2000-01-01T12:23:34.0', format='fits', scale='tdb')
Time('2000-01-01T12:23:34.0(TDB)', format='fits')
Time(2400000.5, 51544.0333981, format='jd', scale='tai')
Time(0.0, 51544.0333981, format='mjd', scale='tai')
Time('2000:001:12:23:34.0', format='yday', scale='tai')
Time('2000:001:12:23:34.0Z', format='yday', scale='utc')
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
Time(dt, format='datetime', scale='tai')
Time([dt, dt], format='datetime', scale='tai')
def test_local_format_transforms(self):
"""
Test trasformation of local time to different formats
Transformation to formats with reference time should give
ScalevalueError
"""
t = Time('2006-01-15 21:24:37.5', scale='local')
assert_allclose(t.jd, 2453751.3921006946, atol=0.001/3600./24., rtol=0.)
assert_allclose(t.mjd, 53750.892100694444, atol=0.001/3600./24., rtol=0.)
assert_allclose(t.decimalyear, 2006.0408002758752, atol=0.001/3600./24./365., rtol=0.)
assert t.datetime == datetime.datetime(2006, 1, 15, 21, 24, 37, 500000)
assert t.isot == '2006-01-15T21:24:37.500'
assert t.yday == '2006:015:21:24:37.500'
assert t.fits == '2006-01-15T21:24:37.500(LOCAL)'
assert_allclose(t.byear, 2006.04217888831, atol=0.001/3600./24./365., rtol=0.)
assert_allclose(t.jyear, 2006.0407723496082, atol=0.001/3600./24./365., rtol=0.)
assert t.byear_str == 'B2006.042'
assert t.jyear_str == 'J2006.041'
# epochTimeFormats
with pytest.raises(ScaleValueError):
t2 = t.gps
with pytest.raises(ScaleValueError):
t2 = t.unix
with pytest.raises(ScaleValueError):
t2 = t.cxcsec
with pytest.raises(ScaleValueError):
t2 = t.plot_date
def test_datetime(self):
"""
Test datetime format, including guessing the format from the input type
by not providing the format keyword to Time.
"""
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
dt2 = datetime.datetime(2001, 1, 1)
t = Time(dt, scale='utc', precision=9)
assert t.iso == '2000-01-02 03:04:05.123456000'
assert t.datetime == dt
assert t.value == dt
t2 = Time(t.iso, scale='utc')
assert t2.datetime == dt
t = Time([dt, dt2], scale='utc')
assert np.all(t.value == [dt, dt2])
t = Time('2000-01-01 01:01:01.123456789', scale='tai')
assert t.datetime == datetime.datetime(2000, 1, 1, 1, 1, 1, 123457)
# broadcasting
dt3 = (dt + (dt2-dt)*np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale='utc')
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1])
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2]))
assert Time(t3[2, 0]) == t3[2, 0]
def test_epoch_transform(self):
"""Besselian and julian epoch transforms"""
jd = 2457073.05631
t = Time(jd, format='jd', scale='tai', precision=6)
assert allclose_year(t.byear, 2015.1365941020817)
assert allclose_year(t.jyear, 2015.1349933196439)
assert t.byear_str == 'B2015.136594'
assert t.jyear_str == 'J2015.134993'
t2 = Time(t.byear, format='byear', scale='tai')
assert allclose_jd(t2.jd, jd)
t2 = Time(t.jyear, format='jyear', scale='tai')
assert allclose_jd(t2.jd, jd)
t = Time('J2015.134993', scale='tai', precision=6)
assert np.allclose(t.jd, jd, rtol=1e-10, atol=0) # J2015.134993 has 10 digit precision
assert t.byear_str == 'B2015.136594'
def test_input_validation(self):
"""Wrong input type raises error"""
times = [10, 20]
with pytest.raises(ValueError):
Time(times, format='iso', scale='utc')
with pytest.raises(ValueError):
Time('2000:001', format='jd', scale='utc')
with pytest.raises(ValueError):
Time([50000.0], ['bad'], format='mjd', scale='tai')
with pytest.raises(ValueError):
Time(50000.0, 'bad', format='mjd', scale='tai')
with pytest.raises(ValueError):
Time('2005-08-04T00:01:02.000Z', scale='tai')
# regression test against #3396
with pytest.raises(ValueError):
Time(np.nan, format='jd', scale='utc')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(TAI)', scale='utc')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(TAI')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(UT(NIST)')
def test_utc_leap_sec(self):
"""Time behaves properly near or in UTC leap second. This
uses the 2012-06-30 leap second for testing."""
for year, month, day in ((2012, 6, 30), (2016, 12, 31)):
# Start with a day without a leap second and note rollover
yyyy_mm = '{:04d}-{:02d}'.format(year, month)
yyyy_mm_dd = '{:04d}-{:02d}-{:02d}'.format(year, month, day)
t1 = Time(yyyy_mm + '-01 23:59:60.0', scale='utc')
assert t1.iso == yyyy_mm + '-02 00:00:00.000'
# Leap second is different
t1 = Time(yyyy_mm_dd + ' 23:59:59.900', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:59.900'
t1 = Time(yyyy_mm_dd + ' 23:59:60.000', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.000'
t1 = Time(yyyy_mm_dd + ' 23:59:60.999', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.999'
if month == 6:
yyyy_mm_dd_plus1 = '{:04d}-07-01'.format(year)
else:
yyyy_mm_dd_plus1 = '{:04d}-01-01'.format(year+1)
t1 = Time(yyyy_mm_dd + ' 23:59:61.0', scale='utc')
assert t1.iso == yyyy_mm_dd_plus1 + ' 00:00:00.000'
# Delta time gives 2 seconds here as expected
t0 = Time(yyyy_mm_dd + ' 23:59:59', scale='utc')
t1 = Time(yyyy_mm_dd_plus1 + ' 00:00:00', scale='utc')
assert allclose_sec((t1 - t0).sec, 2.0)
def test_init_from_time_objects(self):
"""Initialize from one or more Time objects"""
t1 = Time('2007:001', scale='tai')
t2 = Time(['2007-01-02', '2007-01-03'], scale='utc')
# Init from a list of Time objects without an explicit scale
t3 = Time([t1, t2])
# Test that init appropriately combines a scalar (t1) and list (t2)
# and that scale and format are same as first element.
assert len(t3) == 3
assert t3.scale == t1.scale
assert t3.format == t1.format # t1 format is yday
assert np.all(t3.value == np.concatenate([[t1.yday], t2.tai.yday]))
# Init from a single Time object without a scale
t3 = Time(t1)
assert t3.isscalar
assert t3.scale == t1.scale
assert t3.format == t1.format
assert np.all(t3.value == t1.value)
# Init from a single Time object with scale specified
t3 = Time(t1, scale='utc')
assert t3.scale == 'utc'
assert np.all(t3.value == t1.utc.value)
# Init from a list of Time object with scale specified
t3 = Time([t1, t2], scale='tt')
assert t3.scale == 'tt'
assert t3.format == t1.format # yday
assert np.all(t3.value == np.concatenate([[t1.tt.yday], t2.tt.yday]))
# OK, how likely is this... but might as well test.
mjd = np.arange(50000., 50006.)
frac = np.arange(0., 0.999, 0.2)
t4 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc')
t5 = Time([t4[:2], t4[4:5]])
assert t5.shape == (3, 5)
# throw error when deriving local scale time
# from non local time scale
with pytest.raises(ValueError):
t6 = Time(t1, scale='local')
class TestVal2():
"""Tests related to val2"""
def test_val2_ignored(self):
"""Test that val2 is ignored for string input"""
t = Time('2001:001', 'ignored', scale='utc')
assert t.yday == '2001:001:00:00:00.000'
def test_val2(self):
"""Various tests of the val2 input"""
t = Time([0.0, 50000.0], [50000.0, 0.0], format='mjd', scale='tai')
assert t.mjd[0] == t.mjd[1]
assert t.jd[0] == t.jd[1]
def test_val_broadcasts_against_val2(self):
mjd = np.arange(50000., 50007.)
frac = np.arange(0., 0.999, 0.2)
t = Time(mjd[:, np.newaxis], frac, format='mjd', scale='utc')
assert t.shape == (7, 5)
with pytest.raises(ValueError):
Time([0.0, 50000.0], [0.0, 1.0, 2.0], format='mjd', scale='tai')
class TestSubFormat():
"""Test input and output subformat functionality"""
def test_input_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
assert np.all(t.iso == np.array(['2000-01-01 00:00:00.000',
'2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
# Heterogeneous input formats with in_subfmt='date_*'
times = ['2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
in_subfmt='date_*')
assert np.all(t.iso == np.array(['2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
def test_input_subformat_fail(self):
"""Failed format matching"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='date')
def test_bad_input_subformat(self):
"""Non-existent input subformat"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='doesnt exist')
def test_output_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
out_subfmt='date_hm')
assert np.all(t.iso == np.array(['2000-01-01 00:00',
'2000-01-01 01:01',
'2000-01-01 01:01',
'2000-01-01 01:01']))
def test_fits_format(self):
"""FITS format includes bigger years."""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01T01:01:01', '2000-01-01T01:01:01.123']
t = Time(times, format='fits', scale='tai')
assert np.all(t.fits == np.array(['2000-01-01T00:00:00.000(TAI)',
'2000-01-01T01:01:01.000(TAI)',
'2000-01-01T01:01:01.123(TAI)']))
# Explicit long format for output, default scale is UTC.
t2 = Time(times, format='fits', out_subfmt='long*')
assert np.all(t2.fits == np.array(['+02000-01-01T00:00:00.000(UTC)',
'+02000-01-01T01:01:01.000(UTC)',
'+02000-01-01T01:01:01.123(UTC)']))
# Implicit long format for output, because of negative year.
times[2] = '-00594-01-01'
t3 = Time(times, format='fits', scale='tai')
assert np.all(t3.fits == np.array(['+02000-01-01T00:00:00.000(TAI)',
'+02000-01-01T01:01:01.000(TAI)',
'-00594-01-01T00:00:00.000(TAI)']))
# Implicit long format for output, because of large positive year.
times[2] = '+10594-01-01'
t4 = Time(times, format='fits', scale='tai')
assert np.all(t4.fits == np.array(['+02000-01-01T00:00:00.000(TAI)',
'+02000-01-01T01:01:01.000(TAI)',
'+10594-01-01T00:00:00.000(TAI)']))
def test_yday_format(self):
"""Year:Day_of_year format"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-12-01', '2001-12-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
t.out_subfmt = 'date_hm'
assert np.all(t.yday == np.array(['2000:336:00:00',
'2001:335:01:01']))
t.out_subfmt = '*'
assert np.all(t.yday == np.array(['2000:336:00:00:00.000',
'2001:335:01:01:01.123']))
def test_scale_input(self):
"""Test for issues related to scale input"""
# Check case where required scale is defined by the TimeFormat.
# All three should work.
t = Time(100.0, format='cxcsec', scale='utc')
assert t.scale == 'utc'
t = Time(100.0, format='unix', scale='tai')
assert t.scale == 'tai'
t = Time(100.0, format='gps', scale='utc')
assert t.scale == 'utc'
# Check that bad scale is caught when format is specified
with pytest.raises(ScaleValueError):
Time(1950.0, format='byear', scale='bad scale')
# Check that bad scale is caught when format is auto-determined
with pytest.raises(ScaleValueError):
Time('2000:001:00:00:00', scale='bad scale')
def test_fits_scale(self):
"""Test that scale gets interpreted correctly for FITS strings."""
t = Time('2000-01-02(TAI)')
assert t.scale == 'tai'
# Test deprecated scale.
t = Time('2000-01-02(IAT)')
assert t.scale == 'tai'
# Test with scale and FITS string scale
t = Time('2045-11-08T00:00:00.000(UTC)', scale='utc')
assert t.scale == 'utc'
# Test with local time scale and FITS string scale
t = Time('2045-11-08T00:00:00.000(LOCAL)')
assert t.scale == 'local'
# Check that inconsistent scales lead to errors.
with pytest.raises(ValueError):
Time('2000-01-02(TAI)', scale='utc')
with pytest.raises(ValueError):
Time(['2000-01-02(TAI)', '2001-02-03(UTC)'])
# Check that inconsistent FITS string scales lead to errors.
with pytest.raises(ValueError):
Time(['2000-01-02(TAI)', '2001-02-03(IAT)'])
# Check that inconsistent realizations lead to errors.
with pytest.raises(ValueError):
Time(['2000-01-02(ET(NIST))', '2001-02-03(ET)'])
def test_fits_scale_representation(self):
t = Time('1960-01-02T03:04:05.678(ET(NIST))')
assert t.scale == 'tt'
assert t.value == '1960-01-02T03:04:05.678(ET(NIST))'
def test_scale_default(self):
"""Test behavior when no scale is provided"""
# These first three are TimeFromEpoch and have an intrinsic time scale
t = Time(100.0, format='cxcsec')
assert t.scale == 'tt'
t = Time(100.0, format='unix')
assert t.scale == 'utc'
t = Time(100.0, format='gps')
assert t.scale == 'tai'
for date in ('J2000', '2000:001', '2000-01-01T00:00:00'):
t = Time(date)
assert t.scale == 'utc'
t = Time(2000.1, format='byear')
assert t.scale == 'utc'
def test_epoch_times(self):
"""Test time formats derived from EpochFromTime"""
t = Time(0.0, format='cxcsec', scale='tai')
assert t.tt.iso == '1998-01-01 00:00:00.000'
# Create new time object from this one and change scale, format
t2 = Time(t, scale='tt', format='iso')
assert t2.value == '1998-01-01 00:00:00.000'
# Value take from Chandra.Time.DateTime('2010:001:00:00:00').secs
t_cxcsec = 378691266.184
t = Time(t_cxcsec, format='cxcsec', scale='utc')
assert allclose_sec(t.value, t_cxcsec)
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.value, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
assert t.yday == '2010:001:00:00:00.000'
t = Time('2010:001:00:00:00.000', scale='utc')
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
# Value from:
# d = datetime.datetime(2000, 1, 1)
# matplotlib.pylab.dates.date2num(d)
t = Time('2000-01-01 00:00:00', scale='utc')
assert np.allclose(t.plot_date, 730120.0, atol=1e-5, rtol=0)
# Round trip through epoch time
for scale in ('utc', 'tt'):
t = Time('2000:001', scale=scale)
t2 = Time(t.unix, scale=scale, format='unix')
assert getattr(t2, scale).iso == '2000-01-01 00:00:00.000'
# Test unix time. Values taken from http://en.wikipedia.org/wiki/Unix_time
t = Time('2013-05-20 21:18:46', scale='utc')
assert allclose_sec(t.unix, 1369084726.0)
assert allclose_sec(t.tt.unix, 1369084726.0)
# Values from issue #1118
t = Time('2004-09-16T23:59:59', scale='utc')
assert allclose_sec(t.unix, 1095379199.0)
class TestSofaErrors():
"""Test that erfa status return values are handled correctly"""
def test_bad_time(self):
iy = np.array([2000], dtype=np.intc)
im = np.array([2000], dtype=np.intc) # bad month
id = np.array([2000], dtype=np.intc) # bad day
with pytest.raises(ValueError): # bad month, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = -5000
im[0] = 2
with pytest.raises(ValueError): # bad year, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = 2000
with catch_warnings() as w:
djm0, djm = erfa.cal2jd(iy, im, id)
assert len(w) == 1
assert 'bad day (JD computed)' in str(w[0].message)
assert allclose_jd(djm0, [2400000.5])
assert allclose_jd(djm, [53574.])
class TestCopyReplicate():
"""Test issues related to copying and replicating data"""
def test_immutable_input(self):
"""Internals are never mutable."""
jds = np.array([2450000.5], dtype=np.double)
t = Time(jds, format='jd', scale='tai')
assert allclose_jd(t.jd, jds)
jds[0] = 2458654
assert not allclose_jd(t.jd, jds)
mjds = np.array([50000.0], dtype=np.double)
t = Time(mjds, format='mjd', scale='tai')
assert allclose_jd(t.jd, [2450000.5])
mjds[0] = 0.0
assert allclose_jd(t.jd, [2450000.5])
def test_replicate(self):
"""Test replicate method"""
t = Time(['2000:001'], format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.replicate()
assert t.yday == t2.yday
assert t.format == t2.format
assert t.scale == t2.scale
assert t.location == t2.location
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday == t2.yday
assert t.yday != t_yday # prove that it changed
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x == t2.location.x
assert t.location.x != t_loc_x # prove that it changed
def test_copy(self):
"""Test copy method"""
t = Time('2000:001', format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.copy()
assert t.yday == t2.yday
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are not sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday != t2.yday
assert t.yday == t_yday # prove that it did not change
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x != t2.location.x
assert t.location.x == t_loc_x # prove that it changed
def test_python_builtin_copy():
t = Time('2000:001', format='yday', scale='tai')
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
assert t.jd == t2.jd
assert t.jd == t3.jd
def test_now():
"""
Tests creating a Time object with the `now` class method.
"""
now = datetime.datetime.utcnow()
t = Time.now()
assert t.format == 'datetime'
assert t.scale == 'utc'
dt = t.datetime - now # a datetime.timedelta object
# this gives a .1 second margin between the `utcnow` call and the `Time`
# initializer, which is really way more generous than necessary - typical
# times are more like microseconds. But it seems safer in case some
# platforms have slow clock calls or something.
assert dt.total_seconds() < 0.1
def test_decimalyear():
t = Time('2001:001', format='yday')
assert t.decimalyear == 2001.0
t = Time(2000.0, [0.5, 0.75], format='decimalyear')
assert np.all(t.value == [2000.5, 2000.75])
jd0 = Time('2000:001').jd
jd1 = Time('2001:001').jd
d_jd = jd1 - jd0
assert np.all(t.jd == [jd0 + 0.5 * d_jd,
jd0 + 0.75 * d_jd])
def test_fits_year0():
t = Time(1721425.5, format='jd')
assert t.fits == '0001-01-01T00:00:00.000(UTC)'
t = Time(1721425.5 - 366., format='jd')
assert t.fits == '+00000-01-01T00:00:00.000(UTC)'
t = Time(1721425.5 - 366. - 365., format='jd')
assert t.fits == '-00001-01-01T00:00:00.000(UTC)'
def test_fits_year10000():
t = Time(5373484.5, format='jd', scale='tai')
assert t.fits == '+10000-01-01T00:00:00.000(TAI)'
t = Time(5373484.5 - 365., format='jd', scale='tai')
assert t.fits == '9999-01-01T00:00:00.000(TAI)'
t = Time(5373484.5, -1./24./3600., format='jd', scale='tai')
assert t.fits == '9999-12-31T23:59:59.000(TAI)'
def test_dir():
t = Time('2000:001', format='yday', scale='tai')
assert 'utc' in dir(t)
def test_bool():
"""Any Time object should evaluate to True unless it is empty [#3520]."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert bool(t) is True
assert bool(t[0]) is True
assert bool(t[:0]) is False
def test_len_size():
"""Check length of Time objects and that scalar ones do not have one."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert len(t) == 10 and t.size == 10
t1 = Time(np.arange(50000, 50010).reshape(2, 5), format='mjd', scale='utc')
assert len(t1) == 2 and t1.size == 10
# Can have length 1 or length 0 arrays.
t2 = t[:1]
assert len(t2) == 1 and t2.size == 1
t3 = t[:0]
assert len(t3) == 0 and t3.size == 0
# But cannot get length from scalar.
t4 = t[0]
with pytest.raises(TypeError) as err:
len(t4)
# Ensure we're not just getting the old error of
# "object of type 'float' has no len()".
assert 'Time' in str(err)
def test_TimeFormat_scale():
"""guard against recurrence of #1122, where TimeFormat class looses uses
attributes (delta_ut1_utc here), preventing conversion to unix, cxc"""
t = Time('1900-01-01', scale='ut1')
t.delta_ut1_utc = 0.0
t.unix
assert t.unix == t.utc.unix
@pytest.mark.remote_data
def test_scale_conversion():
Time(Time.now().cxcsec, format='cxcsec', scale='ut1')
def test_byteorder():
"""Ensure that bigendian and little-endian both work (closes #2942)"""
mjd = np.array([53000.00, 54000.00])
big_endian = mjd.astype('>f8')
little_endian = mjd.astype('<f8')
time_mjd = Time(mjd, format='mjd')
time_big = Time(big_endian, format='mjd')
time_little = Time(little_endian, format='mjd')
assert np.all(time_big == time_mjd)
assert np.all(time_little == time_mjd)
def test_datetime_tzinfo():
"""
Test #3160 that time zone info in datetime objects is respected.
"""
class TZm6(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=-6)
d = datetime.datetime(2002, 1, 2, 10, 3, 4, tzinfo=TZm6())
t = Time(d)
assert t.value == datetime.datetime(2002, 1, 2, 16, 3, 4)
def test_subfmts_regex():
"""
Test having a custom subfmts with a regular expression
"""
class TimeLongYear(TimeString):
name = 'longyear'
subfmts = (('date',
r'(?P<year>[+-]\d{5})-%m-%d', # hybrid
'{year:+06d}-{mon:02d}-{day:02d}'),)
t = Time('+02000-02-03', format='longyear')
assert t.value == '+02000-02-03'
assert t.jd == Time('2000-02-03').jd
def test_set_format_basic():
"""
Test basics of setting format attribute.
"""
for format, value in (('jd', 2451577.5),
('mjd', 51577.0),
('cxcsec', 65923264.184), # confirmed with Chandra.Time
('datetime', datetime.datetime(2000, 2, 3, 0, 0)),
('iso', '2000-02-03 00:00:00.000')):
t = Time('+02000-02-03', format='fits')
t0 = t.replicate()
t.format = format
assert t.value == value
# Internal jd1 and jd2 are preserved
assert t._time.jd1 is t0._time.jd1
assert t._time.jd2 is t0._time.jd2
def test_set_format_shares_subfmt():
"""
Set format and round trip through a format that shares out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='date_hms', precision=5)
tc = t.copy()
t.format = 'isot'
assert t.precision == 5
assert t.out_subfmt == 'date_hms'
assert t.value == '2000-02-03T00:00:00.00000'
t.format = 'fits'
assert t.value == tc.value
assert t.precision == 5
def test_set_format_does_not_share_subfmt():
"""
Set format and round trip through a format that does not share out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='longdate')
t.format = 'isot'
assert t.out_subfmt == '*' # longdate_hms not there, goes to default
assert t.value == '2000-02-03T00:00:00.000'
t.format = 'fits'
assert t.out_subfmt == '*'
assert t.value == '2000-02-03T00:00:00.000(UTC)' # date_hms
def test_replicate_value_error():
"""
Passing a bad format to replicate should raise ValueError, not KeyError.
PR #3857.
"""
t1 = Time('2007:001', scale='tai')
with pytest.raises(ValueError) as err:
t1.replicate(format='definitely_not_a_valid_format')
assert 'format must be one of' in str(err)
def test_remove_astropy_time():
"""
Make sure that 'astropy_time' format is really gone after #3857. Kind of
silly test but just to be sure.
"""
t1 = Time('2007:001', scale='tai')
assert 'astropy_time' not in t1.FORMATS
with pytest.raises(ValueError) as err:
Time(t1, format='astropy_time')
assert 'format must be one of' in str(err)
def test_isiterable():
"""
Ensure that scalar `Time` instances are not reported as iterable by the
`isiterable` utility.
Regression test for https://github.com/astropy/astropy/issues/4048
"""
t1 = Time.now()
assert not isiterable(t1)
t2 = Time(['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00'],
format='iso', scale='utc')
assert isiterable(t2)
def test_to_datetime():
tz = TimezoneInfo(utc_offset=-10*u.hour, tzname='US/Hawaii')
# The above lines produces a `datetime.tzinfo` object similar to:
# tzinfo = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
assert tz_aware_datetime.time() == datetime.time(14, 0)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
with pytest.raises(ValueError) as e:
Time('2015-06-30 23:59:60.000').to_datetime()
assert 'does not support leap seconds' in str(e.message)
@pytest.mark.skipif('not HAS_PYTZ')
def test_to_datetime_pytz():
tz = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz_aware_datetime.time() == datetime.time(14, 0)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
def test_cache():
t = Time('2010-09-03 00:00:00')
t2 = Time('2010-09-03 00:00:00')
# Time starts out without a cache
assert 'cache' not in t._time.__dict__
# Access the iso format and confirm that the cached version is as expected
t.iso
assert t.cache['format']['iso'] == t2.iso
# Access the TAI scale and confirm that the cached version is as expected
t.tai
assert t.cache['scale']['tai'] == t2.tai
# New Time object after scale transform does not have a cache yet
assert 'cache' not in t.tt._time.__dict__
# Clear the cache
del t.cache
assert 'cache' not in t._time.__dict__
# Check accessing the cache creates an empty dictionary
assert not t.cache
assert 'cache' in t._time.__dict__
def test_epoch_date_jd_is_day_fraction():
"""
Ensure that jd1 and jd2 of an epoch Time are respect the (day, fraction) convention
(see #6638)
"""
t0 = Time("J2000", scale="tdb")
assert t0.jd1 == 2451545.0
assert t0.jd2 == 0.0
t1 = Time(datetime.datetime(2000, 1, 1, 12, 0, 0), scale="tdb")
assert t1.jd1 == 2451545.0
assert t1.jd2 == 0.0
def test_sum_is_equivalent():
"""
Ensure that two equal dates defined in different ways behave equally (#6638)
"""
t0 = Time("J2000", scale="tdb")
t1 = Time("2000-01-01 12:00:00", scale="tdb")
assert t0 == t1
assert (t0 + 1 * u.second) == (t1 + 1 * u.second)
def test_string_valued_columns():
# Columns have a nice shim that translates bytes to string as needed.
# Ensure Time can handle these. Use multi-d array just to be sure.
times = [[['{:04d}-{:02d}-{:02d}'.format(y, m, d) for d in range(1, 3)]
for m in range(5, 7)] for y in range(2012, 2014)]
cutf32 = Column(times)
cbytes = cutf32.astype('S')
tutf32 = Time(cutf32)
tbytes = Time(cbytes)
assert np.all(tutf32 == tbytes)
tutf32 = Time(Column(['B1950']))
tbytes = Time(Column([b'B1950']))
assert tutf32 == tbytes
# Regression tests for arrays with entries with unequal length. gh-6903.
times = Column([b'2012-01-01', b'2012-01-01T00:00:00'])
assert np.all(Time(times) == Time(['2012-01-01', '2012-01-01T00:00:00']))
def test_bytes_input():
tstring = '2011-01-02T03:04:05'
tbytes = b'2011-01-02T03:04:05'
assert tbytes.decode('ascii') == tstring
t0 = Time(tstring)
t1 = Time(tbytes)
assert t1 == t0
tarray = np.array(tbytes)
assert tarray.dtype.kind == 'S'
t2 = Time(tarray)
assert t2 == t0
def test_writeable_flag():
t = Time([1, 2, 3], format='cxcsec')
t[1] = 5.0
assert allclose_sec(t[1].value, 5.0)
t.writeable = False
with pytest.raises(ValueError) as err:
t[1] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err)
with pytest.raises(ValueError) as err:
t[:] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err)
t.writeable = True
t[1] = 10.0
assert allclose_sec(t[1].value, 10.0)
# Scalar is not writeable
t = Time('2000:001', scale='utc')
with pytest.raises(ValueError) as err:
t[()] = '2000:002'
assert 'scalar Time object is read-only.' in str(err)
# Transformed attribute is not writeable
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = t.tt # t2 is read-only now because t.tt is cached
with pytest.raises(ValueError) as err:
t2[0] = '2005:001'
assert 'Time object is read-only. Make a copy()' in str(err)
def test_setitem_location():
loc = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m)
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
# Succeeds because the right hand side makes no implication about
# location and just inherits t.location
t[0, 0] = 0
assert allclose_sec(t.value, [[0, 2], [3, 4]])
# Fails because the right hand side has location=None
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-1, format='cxcsec')
assert ('cannot set to Time with different location: '
'expected location={} and '
'got location=None'.format(loc[0])) in str(err)
# Succeeds because the right hand side correctly sets location
t[0, 0] = Time(-2, format='cxcsec', location=loc[0])
assert allclose_sec(t.value, [[-2, 2], [3, 4]])
# Fails because the right hand side has different location
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location={} and '
'got location={}'.format(loc[0], loc[1])) in str(err)
# Fails because the Time has None location and RHS has defined location
t = Time([[1, 2], [3, 4]], format='cxcsec')
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location=None and '
'got location={}'.format(loc[1])) in str(err)
# Broadcasting works
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
t[0, :] = Time([-3, -4], format='cxcsec', location=loc)
assert allclose_sec(t.value, [[-3, -4], [3, 4]])
def test_setitem_from_python_objects():
t = Time([[1, 2], [3, 4]], format='cxcsec')
assert t.cache == {}
t.iso
assert 'iso' in t.cache['format']
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:00:02.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Setting item clears cache
t[0, 1] = 100
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[3, 4]])
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:01:40.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Set with a float value
t.iso
t[1, :] = 200
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[200, 200]])
# Array of strings in yday format
t[:, 1] = ['1998:002', '1998:003']
assert allclose_sec(t.value, [[1, 86400 * 1],
[200, 86400 * 2]])
# Incompatible numeric value
t = Time(['2000:001', '2000:002'])
t[0] = '2001:001'
with pytest.raises(ValueError) as err:
t[0] = 100
assert 'cannot convert value to a compatible Time object' in str(err)
def test_setitem_from_time_objects():
"""Set from existing Time object.
"""
# Set from time object with different scale
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = Time(['2000:010'], scale='tai')
t[1] = t2[0]
assert t.value[1] == t2.utc.value[0]
# Time object with different scale and format
t = Time(['2000:001', '2000:002'], scale='utc')
t2.format = 'jyear'
t[1] = t2[0]
assert t.yday[1] == t2.utc.yday[0]
def test_setitem_bad_item():
t = Time([1, 2], format='cxcsec')
with pytest.raises(IndexError):
t['asdf'] = 3
def test_setitem_deltas():
"""Setting invalidates any transform deltas"""
t = Time([1, 2], format='cxcsec')
t.delta_tdb_tt = [1, 2]
t.delta_ut1_utc = [3, 4]
t[1] = 3
assert not hasattr(t, '_delta_tdb_tt')
assert not hasattr(t, '_delta_ut1_utc')
def test_subclass():
"""Check that we can initialize subclasses with a Time instance."""
# Ref: Issue gh-#7449 and PR gh-#7453.
class _Time(Time):
pass
t1 = Time('1999-01-01T01:01:01')
t2 = _Time(t1)
assert t2.__class__ == _Time
assert t1 == t2
def test_strftime_scalar():
"""Test of Time.strftime
"""
time_string = '2010-09-03 06:00:00'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S') == time_string
def test_strftime_array():
tstrings = ['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1995-12-31 23:59:60']
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S').tolist() == tstrings
def test_strftime_array_2():
tstrings = [['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1995-12-31 23:59:60']]
tstrings = np.array(tstrings)
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert np.all(t.strftime('%Y-%m-%d %H:%M:%S') == tstrings)
assert t.strftime('%Y-%m-%d %H:%M:%S').shape == tstrings.shape
def test_strftime_leapsecond():
time_string = '1995-12-31 23:59:60'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S') == time_string
def test_strptime_scalar():
"""Test of Time.strptime
"""
time_string = '2007-May-04 21:08:12'
time_object = Time('2007-05-04 21:08:12')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S')
assert t == time_object
def test_strptime_array():
"""Test of Time.strptime
"""
tstrings = [['1998-Jan-01 00:00:01', '1998-Jan-01 00:00:02'],
['1998-Jan-01 00:00:03', '1998-Jan-01 00:00:04']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1998-01-01 00:00:04']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_badinput():
tstrings = [1, 2, 3]
with pytest.raises(TypeError):
Time.strptime(tstrings, '%S')
def test_strptime_input_bytes_scalar():
time_string = b'2007-May-04 21:08:12'
time_object = Time('2007-05-04 21:08:12')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S')
assert t == time_object
def test_strptime_input_bytes_array():
tstrings = [[b'1998-Jan-01 00:00:01', b'1998-Jan-01 00:00:02'],
[b'1998-Jan-01 00:00:03', b'1998-Jan-01 00:00:04']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1998-01-01 00:00:04']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_leapsecond():
time_obj1 = Time('1995-12-31T23:59:60', format='isot')
time_obj2 = Time.strptime('1995-Dec-31 23:59:60', '%Y-%b-%d %H:%M:%S')
assert time_obj1 == time_obj2
def test_strptime_3_digit_year():
time_obj1 = Time('0995-12-31T00:00:00', format='isot')
time_obj2 = Time.strptime('0995-Dec-31 00:00:00', '%Y-%b-%d %H:%M:%S')
assert time_obj1 == time_obj2
|
6d3ce12cc6d2e5266b777d7f9b9ed6fafec4d36bd4aebed20d166e98f285e9ae | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import pytest
import numpy as np
from .. import Time, TimeDelta
from ... import units as u
from ...table import Column
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
class TestTimeQuantity():
"""Test Interaction of Time with Quantities"""
def test_valid_quantity_input(self):
"""Test Time formats that are allowed to take quantity input."""
q = 2450000.125*u.day
t1 = Time(q, format='jd', scale='utc')
assert t1.value == q.value
q2 = q.to(u.second)
t2 = Time(q2, format='jd', scale='utc')
assert t2.value == q.value == q2.to_value(u.day)
q3 = q-2400000.5*u.day
t3 = Time(q3, format='mjd', scale='utc')
assert t3.value == q3.value
# test we can deal with two quantity arguments, with different units
qs = 24.*36.*u.second
t4 = Time(q3, qs, format='mjd', scale='utc')
assert t4.value == (q3+qs).to_value(u.day)
qy = 1990.*u.yr
ty1 = Time(qy, format='jyear', scale='utc')
assert ty1.value == qy.value
ty2 = Time(qy.to(u.day), format='jyear', scale='utc')
assert ty2.value == qy.value
qy2 = 10.*u.yr
tcxc = Time(qy2, format='cxcsec')
assert tcxc.value == qy2.to_value(u.second)
tgps = Time(qy2, format='gps')
assert tgps.value == qy2.to_value(u.second)
tunix = Time(qy2, format='unix')
assert tunix.value == qy2.to_value(u.second)
qd = 2000.*365.*u.day
tplt = Time(qd, format='plot_date', scale='utc')
assert tplt.value == qd.value
def test_invalid_quantity_input(self):
with pytest.raises(u.UnitsError):
Time(2450000.*u.m, format='jd', scale='utc')
with pytest.raises(u.UnitsError):
Time(2450000.*u.dimensionless_unscaled, format='jd', scale='utc')
def test_column_with_and_without_units(self):
"""Ensure a Column without a unit is treated as an array [#3648]"""
a = np.arange(50000., 50010.)
ta = Time(a, format='mjd')
c1 = Column(np.arange(50000., 50010.), name='mjd')
tc1 = Time(c1, format='mjd')
assert np.all(ta == tc1)
c2 = Column(np.arange(50000., 50010.), name='mjd', unit='day')
tc2 = Time(c2, format='mjd')
assert np.all(ta == tc2)
c3 = Column(np.arange(50000., 50010.), name='mjd', unit='m')
with pytest.raises(u.UnitsError):
Time(c3, format='mjd')
def test_no_quantity_input_allowed(self):
"""Time formats that are not allowed to take Quantity input."""
qy = 1990.*u.yr
for fmt in ('iso', 'yday', 'datetime', 'byear',
'byear_str', 'jyear_str'):
with pytest.raises(ValueError):
Time(qy, format=fmt, scale='utc')
def test_valid_quantity_operations(self):
"""Check that adding a time-valued quantity to a Time gives a Time"""
t0 = Time(100000., format='cxcsec')
q1 = 10.*u.second
t1 = t0 + q1
assert isinstance(t1, Time)
assert t1.value == t0.value+q1.to_value(u.second)
q2 = 1.*u.day
t2 = t0 - q2
assert allclose_sec(t2.value, t0.value-q2.to_value(u.second))
# check broadcasting
q3 = np.arange(15.).reshape(3, 5) * u.hour
t3 = t0 - q3
assert t3.shape == q3.shape
assert allclose_sec(t3.value, t0.value-q3.to_value(u.second))
def test_invalid_quantity_operations(self):
"""Check that comparisons of Time with quantities does not work
(even for time-like, since we cannot compare Time to TimeDelta)"""
with pytest.raises(TypeError):
Time(100000., format='cxcsec') > 10.*u.m
with pytest.raises(TypeError):
Time(100000., format='cxcsec') > 10.*u.second
class TestTimeDeltaQuantity():
"""Test interaction of TimeDelta with Quantities"""
def test_valid_quantity_input(self):
"""Test that TimeDelta can take quantity input."""
q = 500.25*u.day
dt1 = TimeDelta(q, format='jd')
assert dt1.value == q.value
dt2 = TimeDelta(q, format='sec')
assert dt2.value == q.to_value(u.second)
dt3 = TimeDelta(q)
assert dt3.value == q.value
def test_invalid_quantity_input(self):
with pytest.raises(u.UnitsError):
TimeDelta(2450000.*u.m, format='jd')
with pytest.raises(u.UnitsError):
Time(2450000.*u.dimensionless_unscaled, format='jd', scale='utc')
with pytest.raises(TypeError):
TimeDelta(100, format='sec') > 10.*u.m
def test_quantity_output(self):
q = 500.25*u.day
dt = TimeDelta(q)
assert dt.to(u.day) == q
assert dt.to(u.second).value == q.to_value(u.second)
with pytest.raises(u.UnitsError):
dt.to(u.m)
def test_valid_quantity_operations1(self):
"""Check adding/substracting/comparing a time-valued quantity works
with a TimeDelta. Addition/subtraction should give TimeDelta"""
t0 = TimeDelta(106400., format='sec')
q1 = 10.*u.second
t1 = t0 + q1
assert isinstance(t1, TimeDelta)
assert t1.value == t0.value+q1.to_value(u.second)
q2 = 1.*u.day
t2 = t0 - q2
assert allclose_sec(t2.value, t0.value-q2.to_value(u.second))
# now comparisons
assert t0 > q1
assert t0 < 1.*u.yr
# and broadcasting
q3 = np.arange(12.).reshape(4, 3) * u.hour
t3 = t0 + q3
assert t3.shape == q3.shape
assert allclose_sec(t3.value, t0.value + q3.to_value(u.second))
def test_valid_quantity_operations2(self):
"""Check that TimeDelta is treated as a quantity where possible."""
t0 = TimeDelta(100000., format='sec')
f = 1./t0
assert isinstance(f, u.Quantity)
assert f.unit == 1./u.day
g = 10.*u.m/u.second**2
v = t0 * g
assert isinstance(v, u.Quantity)
assert v.decompose().unit == u.m / u.second
q = np.log10(t0/u.second)
assert isinstance(q, u.Quantity)
assert q.value == np.log10(t0.sec)
s = 1.*u.m
v = s/t0
assert isinstance(v, u.Quantity)
assert v.decompose().unit == u.m / u.second
# broadcasting
t1 = TimeDelta(np.arange(100000., 100012.).reshape(6, 2), format='sec')
f = np.array([1., 2.]) * u.cycle * u.Hz
phase = f * t1
assert isinstance(phase, u.Quantity)
assert phase.shape == t1.shape
assert phase.unit.is_equivalent(u.cycle)
def test_invalid_quantity_operations(self):
"""Check comparisons of TimeDelta with non-time quantities fails."""
with pytest.raises(TypeError):
TimeDelta(100000., format='sec') > 10.*u.m
def test_invalid_quantity_broadcast(self):
"""Check broadcasting rules in interactions with Quantity."""
t0 = TimeDelta(np.arange(12.).reshape(4, 3), format='sec')
with pytest.raises(ValueError):
t0 + np.arange(4.) * u.s
class TestDeltaAttributes():
def test_delta_ut1_utc(self):
t = Time('2010-01-01 00:00:00', format='iso', scale='utc', precision=6)
t.delta_ut1_utc = 0.3 * u.s
assert t.ut1.iso == '2010-01-01 00:00:00.300000'
t.delta_ut1_utc = 0.4 / 60. * u.minute
assert t.ut1.iso == '2010-01-01 00:00:00.400000'
with pytest.raises(u.UnitsError):
t.delta_ut1_utc = 0.4 * u.m
# Also check that a TimeDelta works.
t.delta_ut1_utc = TimeDelta(0.3, format='sec')
assert t.ut1.iso == '2010-01-01 00:00:00.300000'
t.delta_ut1_utc = TimeDelta(0.5/24./3600., format='jd')
assert t.ut1.iso == '2010-01-01 00:00:00.500000'
def test_delta_tdb_tt(self):
t = Time('2010-01-01 00:00:00', format='iso', scale='tt', precision=6)
t.delta_tdb_tt = 20. * u.second
assert t.tdb.iso == '2010-01-01 00:00:20.000000'
t.delta_tdb_tt = 30. / 60. * u.minute
assert t.tdb.iso == '2010-01-01 00:00:30.000000'
with pytest.raises(u.UnitsError):
t.delta_tdb_tt = 0.4 * u.m
# Also check that a TimeDelta works.
t.delta_tdb_tt = TimeDelta(40., format='sec')
assert t.tdb.iso == '2010-01-01 00:00:40.000000'
t.delta_tdb_tt = TimeDelta(50./24./3600., format='jd')
assert t.tdb.iso == '2010-01-01 00:00:50.000000'
@pytest.mark.parametrize('q1, q2', ((5e8*u.s, None),
(5e17*u.ns, None),
(4e8*u.s, 1e17*u.ns),
(4e14*u.us, 1e17*u.ns)))
def test_quantity_conversion_rounding(q1, q2):
"""Check that no rounding errors are incurred by unit conversion.
This occurred before as quantities in seconds were converted to days
before trying to split them into two-part doubles. See gh-7622.
"""
t = Time('2001-01-01T00:00:00.', scale='tai')
expected = Time('2016-11-05T00:53:20.', scale='tai')
if q2 is None:
t0 = t + q1
else:
t0 = t + q1 + q2
assert abs(t0 - expected) < 20 * u.ps
dt1 = TimeDelta(q1, q2)
t1 = t + dt1
assert abs(t1 - expected) < 20 * u.ps
dt2 = TimeDelta(q1, q2, format='sec')
t2 = t + dt2
assert abs(t2 - expected) < 20 * u.ps
|
0fb1e6448296fac01b85a8e99e674d8e4adfdb1f0c558fe23a94fb6180424541 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import operator
import pytest
import numpy as np
from .. import Time, TimeDelta
class TestTimeComparisons():
"""Test Comparisons of Time and TimeDelta classes"""
def setup(self):
self.t1 = Time(np.arange(49995, 50005), format='mjd', scale='utc')
self.t2 = Time(np.arange(49000, 51000, 200), format='mjd', scale='utc')
def test_miscompares(self):
"""
If an incompatible object is compared to a Time object, == should
return False and != should return True. All other comparison
operators should raise a TypeError.
"""
t1 = Time('J2000', scale='utc')
for op, op_str in ((operator.ge, '>='),
(operator.gt, '>'),
(operator.le, '<='),
(operator.lt, '<')):
with pytest.raises(TypeError) as err:
op(t1, None)
# Keep == and != as they are specifically meant to test Time.__eq__
# and Time.__ne__
assert (t1 == None) is False # nopep8
assert (t1 != None) is True # nopep8
def test_time(self):
t1_lt_t2 = self.t1 < self.t2
assert np.all(t1_lt_t2 == np.array([False, False, False, False, False,
False, True, True, True, True]))
t1_ge_t2 = self.t1 >= self.t2
assert np.all(t1_ge_t2 != t1_lt_t2)
t1_le_t2 = self.t1 <= self.t2
assert np.all(t1_le_t2 == np.array([False, False, False, False, False,
True, True, True, True, True]))
t1_gt_t2 = self.t1 > self.t2
assert np.all(t1_gt_t2 != t1_le_t2)
t1_eq_t2 = self.t1 == self.t2
assert np.all(t1_eq_t2 == np.array([False, False, False, False, False,
True, False, False, False, False]))
t1_ne_t2 = self.t1 != self.t2
assert np.all(t1_ne_t2 != t1_eq_t2)
t1_0_gt_t2_0 = self.t1[0] > self.t2[0]
assert t1_0_gt_t2_0 is True
t1_0_gt_t2 = self.t1[0] > self.t2
assert np.all(t1_0_gt_t2 == np.array([True, True, True, True, True,
False, False, False, False,
False]))
t1_gt_t2_0 = self.t1 > self.t2[0]
assert np.all(t1_gt_t2_0 == np.array([True, True, True, True, True,
True, True, True, True, True]))
def test_timedelta(self):
dt = self.t2 - self.t1
with pytest.raises(TypeError):
self.t1 > dt
dt_gt_td0 = dt > TimeDelta(0., format='sec')
assert np.all(dt_gt_td0 == np.array([False, False, False, False, False,
False, True, True, True, True]))
|
87b7fc74a9a473ef661dd757d3b51d9f466611a9ff17507531f8c2db03a67c00 | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test utilities for `astropy.units`.
"""
import numpy as np
from numpy import finfo
from ...units.utils import sanitize_scale
from ...units.utils import quantity_asanyarray
from ...units.quantity import Quantity
_float_finfo = finfo(float)
def test_quantity_asanyarray():
array_of_quantities = [Quantity(1), Quantity(2), Quantity(3)]
quantity_array = quantity_asanyarray(array_of_quantities)
assert isinstance(quantity_array, Quantity)
array_of_integers = [1, 2, 3]
np_array = quantity_asanyarray(array_of_integers)
assert isinstance(np_array, np.ndarray)
def test_sanitize_scale():
assert sanitize_scale( complex(2, _float_finfo.eps) ) == 2
assert sanitize_scale( complex(_float_finfo.eps, 2) ) == 2j
|
262a417e55354a6c13990fecb8b3a6dcb028ecb031c4e01b8c02d74e36d75bff | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from ... import units as u # pylint: disable=W0611
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.arcsec),
('angle', 'angle')])
def test_args3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
solarx, solary = myfunc_args(1*u.arcsec, 1*u.arcsec)
assert isinstance(solarx, u.Quantity)
assert isinstance(solary, u.Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.arcsec
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.arcsec),
('angle', 'angle')])
def test_args_noconvert3(solarx_unit, solary_unit):
@u.quantity_input()
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
solarx, solary = myfunc_args(1*u.deg, 1*u.arcmin)
assert isinstance(solarx, u.Quantity)
assert isinstance(solary, u.Quantity)
assert solarx.unit == u.deg
assert solary.unit == u.arcmin
@pytest.mark.parametrize("solarx_unit", [
u.arcsec, 'angle'])
def test_args_nonquantity3(solarx_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary):
return solarx, solary
solarx, solary = myfunc_args(1*u.arcsec, 100)
assert isinstance(solarx, u.Quantity)
assert isinstance(solary, int)
assert solarx.unit == u.arcsec
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.eV),
('angle', 'energy')])
def test_arg_equivalencies3(solarx_unit, solary_unit):
@u.quantity_input(equivalencies=u.mass_energy())
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary+(10*u.J) # Add an energy to check equiv is working
solarx, solary = myfunc_args(1*u.arcsec, 100*u.gram)
assert isinstance(solarx, u.Quantity)
assert isinstance(solary, u.Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.gram
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.deg),
('angle', 'angle')])
def test_wrong_unit3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
with pytest.raises(u.UnitsError) as e:
solarx, solary = myfunc_args(1*u.arcsec, 100*u.km)
str_to = str(solary_unit)
assert str(e.value) == "Argument 'solary' to function 'myfunc_args' must be in units convertible to '{0}'.".format(str_to)
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.deg),
('angle', 'angle')])
def test_not_quantity3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
with pytest.raises(TypeError) as e:
solarx, solary = myfunc_args(1*u.arcsec, 100)
assert str(e.value) == "Argument 'solary' to function 'myfunc_args' has no 'unit' attribute. You may want to pass in an astropy Quantity instead."
def test_decorator_override():
@u.quantity_input(solarx=u.arcsec)
def myfunc_args(solarx: u.km, solary: u.arcsec):
return solarx, solary
solarx, solary = myfunc_args(1*u.arcsec, 1*u.arcsec)
assert isinstance(solarx, u.Quantity)
assert isinstance(solary, u.Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.arcsec
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.deg),
('angle', 'angle')])
def test_kwargs3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary, myk: solary_unit=1*u.arcsec):
return solarx, solary, myk
solarx, solary, myk = myfunc_args(1*u.arcsec, 100, myk=100*u.deg)
assert isinstance(solarx, u.Quantity)
assert isinstance(solary, int)
assert isinstance(myk, u.Quantity)
assert myk.unit == u.deg
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.deg),
('angle', 'angle')])
def test_unused_kwargs3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary, myk: solary_unit=1*u.arcsec, myk2=1000):
return solarx, solary, myk, myk2
solarx, solary, myk, myk2 = myfunc_args(1*u.arcsec, 100, myk=100*u.deg, myk2=10)
assert isinstance(solarx, u.Quantity)
assert isinstance(solary, int)
assert isinstance(myk, u.Quantity)
assert isinstance(myk2, int)
assert myk.unit == u.deg
assert myk2 == 10
@pytest.mark.parametrize("solarx_unit,energy", [
(u.arcsec, u.eV),
('angle', 'energy')])
def test_kwarg_equivalencies3(solarx_unit, energy):
@u.quantity_input(equivalencies=u.mass_energy())
def myfunc_args(solarx: solarx_unit, energy: energy=10*u.eV):
return solarx, energy+(10*u.J) # Add an energy to check equiv is working
solarx, energy = myfunc_args(1*u.arcsec, 100*u.gram)
assert isinstance(solarx, u.Quantity)
assert isinstance(energy, u.Quantity)
assert solarx.unit == u.arcsec
assert energy.unit == u.gram
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.deg),
('angle', 'angle')])
def test_kwarg_wrong_unit3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit=10*u.deg):
return solarx, solary
with pytest.raises(u.UnitsError) as e:
solarx, solary = myfunc_args(1*u.arcsec, solary=100*u.km)
str_to = str(solary_unit)
assert str(e.value) == "Argument 'solary' to function 'myfunc_args' must be in units convertible to '{0}'.".format(str_to)
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.deg),
('angle', 'angle')])
def test_kwarg_not_quantity3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit=10*u.deg):
return solarx, solary
with pytest.raises(TypeError) as e:
solarx, solary = myfunc_args(1*u.arcsec, solary=100)
assert str(e.value) == "Argument 'solary' to function 'myfunc_args' has no 'unit' attribute. You may want to pass in an astropy Quantity instead."
@pytest.mark.parametrize("solarx_unit,solary_unit", [
(u.arcsec, u.deg),
('angle', 'angle')])
def test_kwarg_default3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit=10*u.deg):
return solarx, solary
solarx, solary = myfunc_args(1*u.arcsec)
def test_return_annotation():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> u.deg:
return solarx
solarx = myfunc_args(1*u.arcsec)
assert solarx.unit is u.deg
def test_return_annotation_none():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> None:
pass
solarx = myfunc_args(1*u.arcsec)
assert solarx is None
|
12f0a73f04afb445a85c7dd7f1717dbf21b3a0e873c494fbf07c9e831b295cb4 | # The purpose of these tests are to ensure that calling quantities using
# array methods returns quantities with the right units, or raises exceptions.
import pytest
import numpy as np
from ... import units as u
class TestQuantityArrayCopy:
"""
Test whether arrays are properly copied/used in place
"""
def test_copy_on_creation(self):
v = np.arange(1000.)
q_nocopy = u.Quantity(v, "km/s", copy=False)
q_copy = u.Quantity(v, "km/s", copy=True)
v[0] = -1.
assert q_nocopy[0].value == v[0]
assert q_copy[0].value != v[0]
def test_to_copies(self):
q = u.Quantity(np.arange(1., 100.), "km/s")
q2 = q.to(u.m/u.s)
assert np.all(q.value != q2.value)
q3 = q.to(u.km/u.s)
assert np.all(q.value == q3.value)
q[0] = -1.*u.km/u.s
assert q[0].value != q3[0].value
def test_si_copies(self):
q = u.Quantity(np.arange(100.), "m/s")
q2 = q.si
assert np.all(q.value == q2.value)
q[0] = -1.*u.m/u.s
assert q[0].value != q2[0].value
def test_getitem_is_view(self):
"""Check that [keys] work, and that, like ndarray, it returns
a view, so that changing one changes the other.
Also test that one can add axes (closes #1422)
"""
q = u.Quantity(np.arange(100.), "m/s")
q_sel = q[10:20]
q_sel[0] = -1.*u.m/u.s
assert q_sel[0] == q[10]
# also check that getitem can do new axes
q2 = q[:, np.newaxis]
q2[10, 0] = -9*u.m/u.s
assert np.all(q2.flatten() == q)
def test_flat(self):
q = u.Quantity(np.arange(9.).reshape(3, 3), "m/s")
q_flat = q.flat
# check that a single item is a quantity (with the right value)
assert q_flat[8] == 8. * u.m / u.s
# and that getting a range works as well
assert np.all(q_flat[0:2] == np.arange(2.) * u.m / u.s)
# as well as getting items via iteration
q_flat_list = [_q for _q in q.flat]
assert np.all(u.Quantity(q_flat_list) ==
u.Quantity([_a for _a in q.value.flat], q.unit))
# check that flat works like a view of the real array
q_flat[8] = -1. * u.km / u.s
assert q_flat[8] == -1. * u.km / u.s
assert q[2, 2] == -1. * u.km / u.s
# while if one goes by an iterated item, a copy is made
q_flat_list[8] = -2 * u.km / u.s
assert q_flat_list[8] == -2. * u.km / u.s
assert q_flat[8] == -1. * u.km / u.s
assert q[2, 2] == -1. * u.km / u.s
class TestQuantityReshapeFuncs:
"""Test different ndarray methods that alter the array shape
tests: reshape, squeeze, ravel, flatten, transpose, swapaxes
"""
def test_reshape(self):
q = np.arange(6.) * u.m
q_reshape = q.reshape(3, 2)
assert isinstance(q_reshape, u.Quantity)
assert q_reshape.unit == q.unit
assert np.all(q_reshape.value == q.value.reshape(3, 2))
def test_squeeze(self):
q = np.arange(6.).reshape(6, 1) * u.m
q_squeeze = q.squeeze()
assert isinstance(q_squeeze, u.Quantity)
assert q_squeeze.unit == q.unit
assert np.all(q_squeeze.value == q.value.squeeze())
def test_ravel(self):
q = np.arange(6.).reshape(3, 2) * u.m
q_ravel = q.ravel()
assert isinstance(q_ravel, u.Quantity)
assert q_ravel.unit == q.unit
assert np.all(q_ravel.value == q.value.ravel())
def test_flatten(self):
q = np.arange(6.).reshape(3, 2) * u.m
q_flatten = q.flatten()
assert isinstance(q_flatten, u.Quantity)
assert q_flatten.unit == q.unit
assert np.all(q_flatten.value == q.value.flatten())
def test_transpose(self):
q = np.arange(6.).reshape(3, 2) * u.m
q_transpose = q.transpose()
assert isinstance(q_transpose, u.Quantity)
assert q_transpose.unit == q.unit
assert np.all(q_transpose.value == q.value.transpose())
def test_swapaxes(self):
q = np.arange(6.).reshape(3, 1, 2) * u.m
q_swapaxes = q.swapaxes(0, 2)
assert isinstance(q_swapaxes, u.Quantity)
assert q_swapaxes.unit == q.unit
assert np.all(q_swapaxes.value == q.value.swapaxes(0, 2))
class TestQuantityStatsFuncs:
"""
Test statistical functions
"""
def test_mean(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
assert np.mean(q1) == 3.6 * u.m
def test_mean_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
qi = 1.5 * u.s
qi2 = np.mean(q1, out=qi)
assert qi2 is qi
assert qi == 3.6 * u.m
def test_std(self):
q1 = np.array([1., 2.]) * u.m
assert np.std(q1) == 0.5 * u.m
def test_std_inplace(self):
q1 = np.array([1., 2.]) * u.m
qi = 1.5 * u.s
np.std(q1, out=qi)
assert qi == 0.5 * u.m
def test_var(self):
q1 = np.array([1., 2.]) * u.m
assert np.var(q1) == 0.25 * u.m ** 2
def test_var_inplace(self):
q1 = np.array([1., 2.]) * u.m
qi = 1.5 * u.s
np.var(q1, out=qi)
assert qi == 0.25 * u.m ** 2
def test_median(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
assert np.median(q1) == 4. * u.m
def test_median_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
qi = 1.5 * u.s
np.median(q1, out=qi)
assert qi == 4 * u.m
def test_min(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
assert np.min(q1) == 1. * u.m
def test_min_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
qi = 1.5 * u.s
np.min(q1, out=qi)
assert qi == 1. * u.m
def test_argmin(self):
q1 = np.array([6., 2., 4., 5., 6.]) * u.m
assert np.argmin(q1) == 1
def test_max(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
assert np.max(q1) == 6. * u.m
def test_max_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
qi = 1.5 * u.s
np.max(q1, out=qi)
assert qi == 6. * u.m
def test_argmax(self):
q1 = np.array([5., 2., 4., 5., 6.]) * u.m
assert np.argmax(q1) == 4
def test_clip(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.km / u.m
c1 = q1.clip(1500, 5.5 * u.Mm / u.km)
assert np.all(c1 == np.array([1.5, 2., 4., 5., 5.5]) * u.km / u.m)
def test_clip_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.km / u.m
c1 = q1.clip(1500, 5.5 * u.Mm / u.km, out=q1)
assert np.all(q1 == np.array([1.5, 2., 4., 5., 5.5]) * u.km / u.m)
c1[0] = 10 * u.Mm/u.mm
assert np.all(c1.value == q1.value)
def test_conj(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.km / u.m
assert np.all(q1.conj() == q1)
def test_ptp(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
assert np.ptp(q1) == 5. * u.m
def test_ptp_inplace(self):
q1 = np.array([1., 2., 4., 5., 6.]) * u.m
qi = 1.5 * u.s
np.ptp(q1, out=qi)
assert qi == 5. * u.m
def test_round(self):
q1 = np.array([1.253, 2.253, 3.253]) * u.kg
assert np.all(np.round(q1) == np.array([1, 2, 3]) * u.kg)
assert np.all(np.round(q1, decimals=2) ==
np.round(q1.value, decimals=2) * u.kg)
assert np.all(q1.round(decimals=2) ==
q1.value.round(decimals=2) * u.kg)
def test_round_inplace(self):
q1 = np.array([1.253, 2.253, 3.253]) * u.kg
qi = np.zeros(3) * u.s
a = q1.round(decimals=2, out=qi)
assert a is qi
assert np.all(q1.round(decimals=2) == qi)
def test_sum(self):
q1 = np.array([1., 2., 6.]) * u.m
assert np.all(q1.sum() == 9. * u.m)
assert np.all(np.sum(q1) == 9. * u.m)
q2 = np.array([[4., 5., 9.], [1., 1., 1.]]) * u.s
assert np.all(q2.sum(0) == np.array([5., 6., 10.]) * u.s)
assert np.all(np.sum(q2, 0) == np.array([5., 6., 10.]) * u.s)
def test_sum_inplace(self):
q1 = np.array([1., 2., 6.]) * u.m
qi = 1.5 * u.s
np.sum(q1, out=qi)
assert qi == 9. * u.m
def test_cumsum(self):
q1 = np.array([1, 2, 6]) * u.m
assert np.all(q1.cumsum() == np.array([1, 3, 9]) * u.m)
assert np.all(np.cumsum(q1) == np.array([1, 3, 9]) * u.m)
q2 = np.array([4, 5, 9]) * u.s
assert np.all(q2.cumsum() == np.array([4, 9, 18]) * u.s)
assert np.all(np.cumsum(q2) == np.array([4, 9, 18]) * u.s)
def test_cumsum_inplace(self):
q1 = np.array([1, 2, 6]) * u.m
qi = np.ones(3) * u.s
np.cumsum(q1, out=qi)
assert np.all(qi == np.array([1, 3, 9]) * u.m)
q2 = q1
q1.cumsum(out=q1)
assert np.all(q2 == qi)
def test_nansum(self):
q1 = np.array([1., 2., np.nan]) * u.m
assert np.all(q1.nansum() == 3. * u.m)
assert np.all(np.nansum(q1) == 3. * u.m)
q2 = np.array([[np.nan, 5., 9.], [1., np.nan, 1.]]) * u.s
assert np.all(q2.nansum(0) == np.array([1., 5., 10.]) * u.s)
assert np.all(np.nansum(q2, 0) == np.array([1., 5., 10.]) * u.s)
def test_nansum_inplace(self):
q1 = np.array([1., 2., np.nan]) * u.m
qi = 1.5 * u.s
qout = q1.nansum(out=qi)
assert qout is qi
assert qi == np.nansum(q1.value) * q1.unit
qi2 = 1.5 * u.s
qout2 = np.nansum(q1, out=qi2)
assert qout2 is qi2
assert qi2 == np.nansum(q1.value) * q1.unit
def test_prod(self):
q1 = np.array([1, 2, 6]) * u.m
with pytest.raises(ValueError) as exc:
q1.prod()
assert 'cannot use prod' in exc.value.args[0]
with pytest.raises(ValueError) as exc:
np.prod(q1)
assert 'cannot use prod' in exc.value.args[0]
q2 = np.array([3., 4., 5.]) * u.Unit(1)
assert q2.prod() == 60. * u.Unit(1)
assert np.prod(q2) == 60. * u.Unit(1)
def test_cumprod(self):
q1 = np.array([1, 2, 6]) * u.m
with pytest.raises(ValueError) as exc:
q1.cumprod()
assert 'cannot use cumprod' in exc.value.args[0]
with pytest.raises(ValueError) as exc:
np.cumprod(q1)
assert 'cannot use cumprod' in exc.value.args[0]
q2 = np.array([3, 4, 5]) * u.Unit(1)
assert np.all(q2.cumprod() == np.array([3, 12, 60]) * u.Unit(1))
assert np.all(np.cumprod(q2) == np.array([3, 12, 60]) * u.Unit(1))
def test_diff(self):
q1 = np.array([1., 2., 4., 10.]) * u.m
assert np.all(q1.diff() == np.array([1., 2., 6.]) * u.m)
assert np.all(np.diff(q1) == np.array([1., 2., 6.]) * u.m)
def test_ediff1d(self):
q1 = np.array([1., 2., 4., 10.]) * u.m
assert np.all(q1.ediff1d() == np.array([1., 2., 6.]) * u.m)
assert np.all(np.ediff1d(q1) == np.array([1., 2., 6.]) * u.m)
@pytest.mark.xfail
def test_dot_func(self):
q1 = np.array([1., 2., 4., 10.]) * u.m
q2 = np.array([3., 4., 5., 6.]) * u.s
q3 = np.dot(q1, q2)
assert q3.value == np.dot(q1.value, q2.value)
assert q3.unit == u.m * u.s
def test_dot_meth(self):
q1 = np.array([1., 2., 4., 10.]) * u.m
q2 = np.array([3., 4., 5., 6.]) * u.s
q3 = q1.dot(q2)
assert q3.value == np.dot(q1.value, q2.value)
assert q3.unit == u.m * u.s
def test_trace_func(self):
q = np.array([[1., 2.], [3., 4.]]) * u.m
assert np.trace(q) == 5. * u.m
def test_trace_meth(self):
q1 = np.array([[1., 2.], [3., 4.]]) * u.m
assert q1.trace() == 5. * u.m
cont = u.Quantity(4., u.s)
q2 = np.array([[3., 4.], [5., 6.]]) * u.m
q2.trace(out=cont)
assert cont == 9. * u.m
def test_clip_func(self):
q = np.arange(10) * u.m
assert np.all(np.clip(q, 3 * u.m, 6 * u.m) == np.array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.]) * u.m)
def test_clip_meth(self):
expected = np.array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.]) * u.m
q1 = np.arange(10) * u.m
q3 = q1.clip(3 * u.m, 6 * u.m)
assert np.all(q1.clip(3 * u.m, 6 * u.m) == expected)
cont = np.zeros(10) * u.s
q1.clip(3 * u.m, 6 * u.m, out=cont)
assert np.all(cont == expected)
class TestArrayConversion:
"""
Test array conversion methods
"""
def test_item(self):
q1 = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
assert q1.item(1) == 2 * q1.unit
q1.itemset(1, 1)
assert q1.item(1) == 1000 * u.m / u.km
q1.itemset(1, 100 * u.cm / u.km)
assert q1.item(1) == 1 * u.m / u.km
with pytest.raises(TypeError):
q1.itemset(1, 1.5 * u.m / u.km)
with pytest.raises(ValueError):
q1.itemset()
q1[1] = 1
assert q1[1] == 1000 * u.m / u.km
q1[1] = 100 * u.cm / u.km
assert q1[1] == 1 * u.m / u.km
with pytest.raises(TypeError):
q1[1] = 1.5 * u.m / u.km
q1 = np.array([1, 2, 3]) * u.m / u.km
assert all(q1.take((0, 2)) == np.array([1, 3]) * u.m / u.km)
q1.put((1, 2), (3, 4))
assert np.all(q1.take((1, 2)) == np.array([3000, 4000]) * q1.unit)
q1.put(0, 500 * u.cm / u.km)
assert q1.item(0) == 5 * u.m / u.km
def test_slice(self):
"""Test that setitem changes the unit if needed (or ignores it for
values where that is allowed; viz., #2695)"""
q2 = np.array([[1., 2., 3.], [4., 5., 6.]]) * u.km / u.m
q1 = q2.copy()
q2[0, 0] = 10000.
assert q2.unit == q1.unit
assert q2[0, 0].value == 10.
q2[0] = 9. * u.Mm / u.km
assert all(q2.flatten()[:3].value == np.array([9., 9., 9.]))
q2[0, :-1] = 8000.
assert all(q2.flatten()[:3].value == np.array([8., 8., 9.]))
with pytest.raises(u.UnitsError):
q2[1, 1] = 10 * u.s
# just to be sure, repeat with a dimensionfull unit
q3 = u.Quantity(np.arange(10.), "m/s")
q3[5] = 100. * u.cm / u.s
assert q3[5].value == 1.
# and check unit is ignored for 0, inf, nan, where that is reasonable
q3[5] = 0.
assert q3[5] == 0.
q3[5] = np.inf
assert np.isinf(q3[5])
q3[5] = np.nan
assert np.isnan(q3[5])
def test_fill(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
q1.fill(2)
assert np.all(q1 == 2000 * u.m / u.km)
def test_repeat_compress_diagonal(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
q2 = q1.repeat(2)
assert q2.unit == q1.unit
assert all(q2.value == q1.value.repeat(2))
q2.sort()
assert q2.unit == q1.unit
q2 = q1.compress(np.array([True, True, False, False]))
assert q2.unit == q1.unit
assert all(q2.value == q1.value.compress(np.array([True, True,
False, False])))
q1 = np.array([[1, 2], [3, 4]]) * u.m / u.km
q2 = q1.diagonal()
assert q2.unit == q1.unit
assert all(q2.value == q1.value.diagonal())
def test_view(self):
q1 = np.array([1, 2, 3], dtype=np.int64) * u.m / u.km
q2 = q1.view(np.ndarray)
assert not hasattr(q2, 'unit')
q3 = q2.view(u.Quantity)
assert q3._unit is None
# MaskedArray copies and properties assigned in __dict__
q4 = np.ma.MaskedArray(q1)
assert q4._unit is q1._unit
q5 = q4.view(u.Quantity)
assert q5.unit is q1.unit
def test_slice_to_quantity(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2003
"""
a = np.random.uniform(size=(10, 8))
x, y, z = a[:, 1:4].T * u.km/u.s
total = np.sum(a[:, 1] * u.km / u.s - x)
assert isinstance(total, u.Quantity)
assert total == (0.0 * u.km / u.s)
def test_byte_type_view_field_changes(self):
q1 = np.array([1, 2, 3], dtype=np.int64) * u.m / u.km
q2 = q1.byteswap()
assert q2.unit == q1.unit
assert all(q2.value == q1.value.byteswap())
q2 = q1.astype(np.float64)
assert all(q2 == q1)
assert q2.dtype == np.float64
q2a = q1.getfield(np.int32, offset=0)
q2b = q1.byteswap().getfield(np.int32, offset=4)
assert q2a.unit == q1.unit
assert all(q2b.byteswap() == q2a)
def test_sort(self):
q1 = np.array([1., 5., 2., 4.]) * u.km / u.m
i = q1.argsort()
assert not hasattr(i, 'unit')
q1.sort()
i = q1.searchsorted([1500, 2500])
assert not hasattr(i, 'unit')
assert all(i == q1.to(
u.dimensionless_unscaled).value.searchsorted([1500, 2500]))
def test_not_implemented(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
with pytest.raises(NotImplementedError):
q1.choose([0, 0, 1])
with pytest.raises(NotImplementedError):
q1.tolist()
with pytest.raises(NotImplementedError):
q1.tostring()
with pytest.raises(NotImplementedError):
q1.tofile(0)
with pytest.raises(NotImplementedError):
q1.dump('a.a')
with pytest.raises(NotImplementedError):
q1.dumps()
class TestRecArray:
"""Record arrays are not specifically supported, but we should not
prevent their use unnecessarily"""
def setup(self):
self.ra = (np.array(np.arange(12.).reshape(4, 3))
.view(dtype=('f8,f8,f8')).squeeze())
def test_creation(self):
qra = u.Quantity(self.ra, u.m)
assert np.all(qra[:2].value == self.ra[:2])
def test_equality(self):
qra = u.Quantity(self.ra, u.m)
qra[1] = qra[2]
assert qra[1] == qra[2]
|
9c9c03b9194351bf5857ba8bb8fe0bd6e6c982d929443dce43588062f33f5605 | """
Test ``allclose`` and ``isclose``.
``allclose`` was ``quantity_allclose`` in ``astropy.tests.helper``.
"""
import numpy as np
import pytest
from ... import units as u
@pytest.mark.parametrize(
('a', 'b'),
[([1, 2], [1, 2]),
([1, 2] * u.m, [100, 200] * u.cm),
(1 * u.s, 1000 * u.ms)])
def test_allclose_isclose_default(a, b):
assert u.allclose(a, b)
assert np.all(u.isclose(a, b))
def test_allclose_isclose():
a = [1, 2] * u.m
b = [101, 201] * u.cm
delta = 2 * u.cm
assert u.allclose(a, b, atol=delta)
assert np.all(u.isclose(a, b, atol=delta))
c = [90, 200] * u.cm
assert not u.allclose(a, c)
assert not np.all(u.isclose(a, c))
|
4302a2702bac138e753912037042acc441c56bec6c1b34963c77c5810e562ca4 | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the Quantity class and related.
"""
import copy
import pickle
import decimal
from fractions import Fraction
import pytest
import numpy as np
from numpy.testing import (assert_allclose, assert_array_equal,
assert_array_almost_equal)
from ...tests.helper import catch_warnings, raises
from ...utils import isiterable, minversion
from ...utils.compat import NUMPY_LT_1_14
from ...utils.exceptions import AstropyDeprecationWarning
from ... import units as u
from ...units.quantity import _UNIT_NOT_INITIALISED
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from distutils.version import LooseVersion
MATPLOTLIB_LT_15 = LooseVersion(matplotlib.__version__) < LooseVersion("1.5")
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
""" The Quantity class will represent a number + unit + uncertainty """
class TestQuantityCreation:
def test_1(self):
# create objects through operations with Unit objects:
quantity = 11.42 * u.meter # returns a Quantity object
assert isinstance(quantity, u.Quantity)
quantity = u.meter * 11.42 # returns a Quantity object
assert isinstance(quantity, u.Quantity)
quantity = 11.42 / u.meter
assert isinstance(quantity, u.Quantity)
quantity = u.meter / 11.42
assert isinstance(quantity, u.Quantity)
quantity = 11.42 * u.meter / u.second
assert isinstance(quantity, u.Quantity)
with pytest.raises(TypeError):
quantity = 182.234 + u.meter
with pytest.raises(TypeError):
quantity = 182.234 - u.meter
with pytest.raises(TypeError):
quantity = 182.234 % u.meter
def test_2(self):
# create objects using the Quantity constructor:
q1 = u.Quantity(11.412, unit=u.meter)
q2 = u.Quantity(21.52, "cm")
q3 = u.Quantity(11.412)
# By default quantities that don't specify a unit are unscaled
# dimensionless
assert q3.unit == u.Unit(1)
with pytest.raises(TypeError):
q4 = u.Quantity(object(), unit=u.m)
def test_3(self):
# with pytest.raises(u.UnitsError):
with pytest.raises(ValueError): # Until @mdboom fixes the errors in units
q1 = u.Quantity(11.412, unit="testingggg")
def test_nan_inf(self):
# Not-a-number
q = u.Quantity('nan', unit='cm')
assert np.isnan(q.value)
q = u.Quantity('NaN', unit='cm')
assert np.isnan(q.value)
q = u.Quantity('-nan', unit='cm') # float() allows this
assert np.isnan(q.value)
q = u.Quantity('nan cm')
assert np.isnan(q.value)
assert q.unit == u.cm
# Infinity
q = u.Quantity('inf', unit='cm')
assert np.isinf(q.value)
q = u.Quantity('-inf', unit='cm')
assert np.isinf(q.value)
q = u.Quantity('inf cm')
assert np.isinf(q.value)
assert q.unit == u.cm
q = u.Quantity('Infinity', unit='cm') # float() allows this
assert np.isinf(q.value)
# make sure these strings don't parse...
with pytest.raises(TypeError):
q = u.Quantity('', unit='cm')
with pytest.raises(TypeError):
q = u.Quantity('spam', unit='cm')
def test_unit_property(self):
# test getting and setting 'unit' attribute
q1 = u.Quantity(11.4, unit=u.meter)
with pytest.raises(AttributeError):
q1.unit = u.cm
def test_preserve_dtype(self):
"""Test that if an explicit dtype is given, it is used, while if not,
numbers are converted to float (including decimal.Decimal, which
numpy converts to an object; closes #1419)
"""
# If dtype is specified, use it, but if not, convert int, bool to float
q1 = u.Quantity(12, unit=u.m / u.s, dtype=int)
assert q1.dtype == int
q2 = u.Quantity(q1)
assert q2.dtype == float
assert q2.value == float(q1.value)
assert q2.unit == q1.unit
# but we should preserve float32
a3 = np.array([1., 2.], dtype=np.float32)
q3 = u.Quantity(a3, u.yr)
assert q3.dtype == a3.dtype
# items stored as objects by numpy should be converted to float
# by default
q4 = u.Quantity(decimal.Decimal('10.25'), u.m)
assert q4.dtype == float
q5 = u.Quantity(decimal.Decimal('10.25'), u.m, dtype=object)
assert q5.dtype == object
def test_copy(self):
# By default, a new quantity is constructed, but not if copy=False
a = np.arange(10.)
q0 = u.Quantity(a, unit=u.m / u.s)
assert q0.base is not a
q1 = u.Quantity(a, unit=u.m / u.s, copy=False)
assert q1.base is a
q2 = u.Quantity(q0)
assert q2 is not q0
assert q2.base is not q0.base
q2 = u.Quantity(q0, copy=False)
assert q2 is q0
assert q2.base is q0.base
q3 = u.Quantity(q0, q0.unit, copy=False)
assert q3 is q0
assert q3.base is q0.base
q4 = u.Quantity(q0, u.cm / u.s, copy=False)
assert q4 is not q0
assert q4.base is not q0.base
def test_subok(self):
"""Test subok can be used to keep class, or to insist on Quantity"""
class MyQuantitySubclass(u.Quantity):
pass
myq = MyQuantitySubclass(np.arange(10.), u.m)
# try both with and without changing the unit
assert type(u.Quantity(myq)) is u.Quantity
assert type(u.Quantity(myq, subok=True)) is MyQuantitySubclass
assert type(u.Quantity(myq, u.km)) is u.Quantity
assert type(u.Quantity(myq, u.km, subok=True)) is MyQuantitySubclass
def test_order(self):
"""Test that order is correctly propagated to np.array"""
ac = np.array(np.arange(10.), order='C')
qcc = u.Quantity(ac, u.m, order='C')
assert qcc.flags['C_CONTIGUOUS']
qcf = u.Quantity(ac, u.m, order='F')
assert qcf.flags['F_CONTIGUOUS']
qca = u.Quantity(ac, u.m, order='A')
assert qca.flags['C_CONTIGUOUS']
# check it works also when passing in a quantity
assert u.Quantity(qcc, order='C').flags['C_CONTIGUOUS']
assert u.Quantity(qcc, order='A').flags['C_CONTIGUOUS']
assert u.Quantity(qcc, order='F').flags['F_CONTIGUOUS']
af = np.array(np.arange(10.), order='F')
qfc = u.Quantity(af, u.m, order='C')
assert qfc.flags['C_CONTIGUOUS']
qff = u.Quantity(ac, u.m, order='F')
assert qff.flags['F_CONTIGUOUS']
qfa = u.Quantity(af, u.m, order='A')
assert qfa.flags['F_CONTIGUOUS']
assert u.Quantity(qff, order='C').flags['C_CONTIGUOUS']
assert u.Quantity(qff, order='A').flags['F_CONTIGUOUS']
assert u.Quantity(qff, order='F').flags['F_CONTIGUOUS']
def test_ndmin(self):
"""Test that ndmin is correctly propagated to np.array"""
a = np.arange(10.)
q1 = u.Quantity(a, u.m, ndmin=1)
assert q1.ndim == 1 and q1.shape == (10,)
q2 = u.Quantity(a, u.m, ndmin=2)
assert q2.ndim == 2 and q2.shape == (1, 10)
# check it works also when passing in a quantity
q3 = u.Quantity(q1, u.m, ndmin=3)
assert q3.ndim == 3 and q3.shape == (1, 1, 10)
def test_non_quantity_with_unit(self):
"""Test that unit attributes in objects get recognized."""
class MyQuantityLookalike(np.ndarray):
pass
a = np.arange(3.)
mylookalike = a.copy().view(MyQuantityLookalike)
mylookalike.unit = 'm'
q1 = u.Quantity(mylookalike)
assert isinstance(q1, u.Quantity)
assert q1.unit is u.m
assert np.all(q1.value == a)
q2 = u.Quantity(mylookalike, u.mm)
assert q2.unit is u.mm
assert np.all(q2.value == 1000.*a)
q3 = u.Quantity(mylookalike, copy=False)
assert np.all(q3.value == mylookalike)
q3[2] = 0
assert q3[2] == 0.
assert mylookalike[2] == 0.
mylookalike = a.copy().view(MyQuantityLookalike)
mylookalike.unit = u.m
q4 = u.Quantity(mylookalike, u.mm, copy=False)
q4[2] = 0
assert q4[2] == 0.
assert mylookalike[2] == 2.
mylookalike.unit = 'nonsense'
with pytest.raises(TypeError):
u.Quantity(mylookalike)
class TestQuantityOperations:
q1 = u.Quantity(11.42, u.meter)
q2 = u.Quantity(8.0, u.centimeter)
def test_addition(self):
# Take units from left object, q1
new_quantity = self.q1 + self.q2
assert new_quantity.value == 11.5
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 + self.q1
assert new_quantity.value == 1150.0
assert new_quantity.unit == u.centimeter
new_q = u.Quantity(1500.1, u.m) + u.Quantity(13.5, u.km)
assert new_q.unit == u.m
assert new_q.value == 15000.1
def test_subtraction(self):
# Take units from left object, q1
new_quantity = self.q1 - self.q2
assert new_quantity.value == 11.34
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 - self.q1
assert new_quantity.value == -1134.0
assert new_quantity.unit == u.centimeter
def test_multiplication(self):
# Take units from left object, q1
new_quantity = self.q1 * self.q2
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.meter * u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 * self.q1
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.centimeter * u.meter)
# Multiply with a number
new_quantity = 15. * self.q1
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
# Multiply with a number
new_quantity = self.q1 * 15.
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
def test_division(self):
# Take units from left object, q1
new_quantity = self.q1 / self.q2
assert_array_almost_equal(new_quantity.value, 1.4275, decimal=5)
assert new_quantity.unit == (u.meter / u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 / self.q1
assert_array_almost_equal(new_quantity.value, 0.70052539404553416,
decimal=16)
assert new_quantity.unit == (u.centimeter / u.meter)
q1 = u.Quantity(11.4, unit=u.meter)
q2 = u.Quantity(10.0, unit=u.second)
new_quantity = q1 / q2
assert_array_almost_equal(new_quantity.value, 1.14, decimal=10)
assert new_quantity.unit == (u.meter / u.second)
# divide with a number
new_quantity = self.q1 / 10.
assert new_quantity.value == 1.142
assert new_quantity.unit == u.meter
# divide with a number
new_quantity = 11.42 / self.q1
assert new_quantity.value == 1.
assert new_quantity.unit == u.Unit("1/m")
def test_commutativity(self):
"""Regression test for issue #587."""
new_q = u.Quantity(11.42, 'm*s')
assert self.q1 * u.s == u.s * self.q1 == new_q
assert self.q1 / u.s == u.Quantity(11.42, 'm/s')
assert u.s / self.q1 == u.Quantity(1 / 11.42, 's/m')
def test_power(self):
# raise quantity to a power
new_quantity = self.q1 ** 2
assert_array_almost_equal(new_quantity.value, 130.4164, decimal=5)
assert new_quantity.unit == u.Unit("m^2")
new_quantity = self.q1 ** 3
assert_array_almost_equal(new_quantity.value, 1489.355288, decimal=7)
assert new_quantity.unit == u.Unit("m^3")
def test_matrix_multiplication(self):
a = np.eye(3)
q = a * u.m
result1 = eval("q @ a")
assert np.all(result1 == q)
result2 = eval("a @ q")
assert np.all(result2 == q)
result3 = eval("q @ q")
assert np.all(result3 == a * u.m ** 2)
# less trivial case.
q2 = np.array([[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]]]) / u.s
result4 = eval("q @ q2")
assert np.all(result4 == np.matmul(a, q2.value) * q.unit * q2.unit)
def test_unary(self):
# Test the minus unary operator
new_quantity = -self.q1
assert new_quantity.value == -self.q1.value
assert new_quantity.unit == self.q1.unit
new_quantity = -(-self.q1)
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
# Test the plus unary operator
new_quantity = +self.q1
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
def test_abs(self):
q = 1. * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == q.value
assert new_quantity.unit == q.unit
q = -1. * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == -q.value
assert new_quantity.unit == q.unit
def test_incompatible_units(self):
""" When trying to add or subtract units that aren't compatible, throw an error """
q1 = u.Quantity(11.412, unit=u.meter)
q2 = u.Quantity(21.52, unit=u.second)
with pytest.raises(u.UnitsError):
new_q = q1 + q2
def test_non_number_type(self):
q1 = u.Quantity(11.412, unit=u.meter)
type_err_msg = ("Unsupported operand type(s) for ufunc add: "
"'Quantity' and 'dict'")
with pytest.raises(TypeError) as exc:
q1 + {'a': 1}
assert exc.value.args[0] == type_err_msg
with pytest.raises(TypeError):
q1 + u.meter
def test_dimensionless_operations(self):
# test conversion to dimensionless
dq = 3. * u.m / u.km
dq1 = dq + 1. * u.mm / u.km
assert dq1.value == 3.001
assert dq1.unit == dq.unit
dq2 = dq + 1.
assert dq2.value == 1.003
assert dq2.unit == u.dimensionless_unscaled
# this test will check that operations with dimensionless Quantities
# don't work
with pytest.raises(u.UnitsError):
self.q1 + u.Quantity(0.1, unit=u.Unit(""))
with pytest.raises(u.UnitsError):
self.q1 - u.Quantity(0.1, unit=u.Unit(""))
# and test that scaling of integers works
q = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
q2 = q + np.array([4, 5, 6])
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value, np.array([4.001, 5.002, 6.003]))
# but not if doing it inplace
with pytest.raises(TypeError):
q += np.array([1, 2, 3])
# except if it is actually possible
q = np.array([1, 2, 3]) * u.km / u.m
q += np.array([4, 5, 6])
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == np.array([1004, 2005, 3006]))
def test_complicated_operation(self):
""" Perform a more complicated test """
from .. import imperial
# Multiple units
distance = u.Quantity(15., u.meter)
time = u.Quantity(11., u.second)
velocity = (distance / time).to(imperial.mile / u.hour)
assert_array_almost_equal(
velocity.value, 3.05037, decimal=5)
G = u.Quantity(6.673E-11, u.m ** 3 / u.kg / u.s ** 2)
new_q = ((1. / (4. * np.pi * G)).to(u.pc ** -3 / u.s ** -2 * u.kg))
# Area
side1 = u.Quantity(11., u.centimeter)
side2 = u.Quantity(7., u.centimeter)
area = side1 * side2
assert_array_almost_equal(area.value, 77., decimal=15)
assert area.unit == u.cm * u.cm
def test_comparison(self):
# equality/ non-equality is straightforward for quantity objects
assert (1 / (u.cm * u.cm)) == 1 * u.cm ** -2
assert 1 * u.m == 100 * u.cm
assert 1 * u.m != 1 * u.cm
# when one is a unit, Quantity does not know what to do,
# but unit is fine with it, so it still works
unit = u.cm**3
q = 1. * unit
assert q.__eq__(unit) is NotImplemented
assert unit.__eq__(q) is True
assert q == unit
q = 1000. * u.mm**3
assert q == unit
# mismatched types should never work
assert not 1. * u.cm == 1.
assert 1. * u.cm != 1.
# comparison with zero should raise a deprecation warning
for quantity in (1. * u.cm, 1. * u.dimensionless_unscaled):
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
bool(quantity)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == 'The truth value of '
'a Quantity is ambiguous. In the future this will '
'raise a ValueError.')
def test_numeric_converters(self):
# float, int, long, and __index__ should only work for single
# quantities, of appropriate type, and only if they are dimensionless.
# for index, this should be unscaled as well
# (Check on __index__ is also a regression test for #1557)
# quantities with units should never convert, or be usable as an index
q1 = u.Quantity(1, u.m)
converter_err_msg = ("only dimensionless scalar quantities "
"can be converted to Python scalars")
index_err_msg = ("only integer dimensionless scalar quantities "
"can be converted to a Python index")
with pytest.raises(TypeError) as exc:
float(q1)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q1)
assert exc.value.args[0] == converter_err_msg
# We used to test `q1 * ['a', 'b', 'c'] here, but that that worked
# at all was a really odd confluence of bugs. Since it doesn't work
# in numpy >=1.10 any more, just go directly for `__index__` (which
# makes the test more similar to the `int`, `long`, etc., tests).
with pytest.raises(TypeError) as exc:
q1.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless but scaled is OK, however
q2 = u.Quantity(1.23, u.m / u.km)
assert float(q2) == float(q2.to_value(u.dimensionless_unscaled))
assert int(q2) == int(q2.to_value(u.dimensionless_unscaled))
with pytest.raises(TypeError) as exc:
q2.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless unscaled is OK, though for index needs to be int
q3 = u.Quantity(1.23, u.dimensionless_unscaled)
assert float(q3) == 1.23
assert int(q3) == 1
with pytest.raises(TypeError) as exc:
q3.__index__()
assert exc.value.args[0] == index_err_msg
# integer dimensionless unscaled is good for all
q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)
assert float(q4) == 2.
assert int(q4) == 2
assert q4.__index__() == 2
# but arrays are not OK
q5 = u.Quantity([1, 2], u.m)
with pytest.raises(TypeError) as exc:
float(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
q5.__index__()
assert exc.value.args[0] == index_err_msg
# See https://github.com/numpy/numpy/issues/5074
# It seems unlikely this will be resolved, so xfail'ing it.
@pytest.mark.xfail(reason="list multiplication only works for numpy <=1.10")
def test_numeric_converter_to_index_in_practice(self):
"""Test that use of __index__ actually works."""
q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)
assert q4 * ['a', 'b', 'c'] == ['a', 'b', 'c', 'a', 'b', 'c']
def test_array_converters(self):
# Scalar quantity
q = u.Quantity(1.23, u.m)
assert np.all(np.array(q) == np.array([1.23]))
# Array quantity
q = u.Quantity([1., 2., 3.], u.m)
assert np.all(np.array(q) == np.array([1., 2., 3.]))
def test_quantity_conversion():
q1 = u.Quantity(0.1, unit=u.meter)
value = q1.value
assert value == 0.1
value_in_km = q1.to_value(u.kilometer)
assert value_in_km == 0.0001
new_quantity = q1.to(u.kilometer)
assert new_quantity.value == 0.0001
with pytest.raises(u.UnitsError):
q1.to(u.zettastokes)
with pytest.raises(u.UnitsError):
q1.to_value(u.zettastokes)
def test_quantity_value_views():
q1 = u.Quantity([1., 2.], unit=u.meter)
# views if the unit is the same.
v1 = q1.value
v1[0] = 0.
assert np.all(q1 == [0., 2.] * u.meter)
v2 = q1.to_value()
v2[1] = 3.
assert np.all(q1 == [0., 3.] * u.meter)
v3 = q1.to_value('m')
v3[0] = 1.
assert np.all(q1 == [1., 3.] * u.meter)
v4 = q1.to_value('cm')
v4[0] = 0.
# copy if different unit.
assert np.all(q1 == [1., 3.] * u.meter)
def test_quantity_conversion_with_equiv():
q1 = u.Quantity(0.1, unit=u.meter)
v2 = q1.to_value(u.Hz, equivalencies=u.spectral())
assert_allclose(v2, 2997924580.0)
q2 = q1.to(u.Hz, equivalencies=u.spectral())
assert_allclose(q2.value, v2)
q1 = u.Quantity(0.4, unit=u.arcsecond)
v2 = q1.to_value(u.au, equivalencies=u.parallax())
q2 = q1.to(u.au, equivalencies=u.parallax())
v3 = q2.to_value(u.arcminute, equivalencies=u.parallax())
q3 = q2.to(u.arcminute, equivalencies=u.parallax())
assert_allclose(v2, 515662.015)
assert_allclose(q2.value, v2)
assert q2.unit == u.au
assert_allclose(v3, 0.0066666667)
assert_allclose(q3.value, v3)
assert q3.unit == u.arcminute
def test_quantity_conversion_equivalency_passed_on():
class MySpectral(u.Quantity):
_equivalencies = u.spectral()
def __quantity_view__(self, obj, unit):
return obj.view(MySpectral)
def __quantity_instance__(self, *args, **kwargs):
return MySpectral(*args, **kwargs)
q1 = MySpectral([1000, 2000], unit=u.Hz)
q2 = q1.to(u.nm)
assert q2.unit == u.nm
q3 = q2.to(u.Hz)
assert q3.unit == u.Hz
assert_allclose(q3.value, q1.value)
q4 = MySpectral([1000, 2000], unit=u.nm)
q5 = q4.to(u.Hz).to(u.nm)
assert q5.unit == u.nm
assert_allclose(q4.value, q5.value)
# Regression test for issue #2315, divide-by-zero error when examining 0*unit
def test_self_equivalency():
assert u.deg.is_equivalent(0*u.radian)
assert u.deg.is_equivalent(1*u.radian)
def test_si():
q1 = 10. * u.m * u.s ** 2 / (200. * u.ms) ** 2 # 250 meters
assert q1.si.value == 250
assert q1.si.unit == u.m
q = 10. * u.m # 10 meters
assert q.si.value == 10
assert q.si.unit == u.m
q = 10. / u.m # 10 1 / meters
assert q.si.value == 10
assert q.si.unit == (1 / u.m)
def test_cgs():
q1 = 10. * u.cm * u.s ** 2 / (200. * u.ms) ** 2 # 250 centimeters
assert q1.cgs.value == 250
assert q1.cgs.unit == u.cm
q = 10. * u.m # 10 centimeters
assert q.cgs.value == 1000
assert q.cgs.unit == u.cm
q = 10. / u.cm # 10 1 / centimeters
assert q.cgs.value == 10
assert q.cgs.unit == (1 / u.cm)
q = 10. * u.Pa # 10 pascals
assert q.cgs.value == 100
assert q.cgs.unit == u.barye
class TestQuantityComparison:
def test_quantity_equality(self):
assert u.Quantity(1000, unit='m') == u.Quantity(1, unit='km')
assert not (u.Quantity(1, unit='m') == u.Quantity(1, unit='km'))
# for ==, !=, return False, True if units do not match
assert (u.Quantity(1100, unit=u.m) != u.Quantity(1, unit=u.s)) is True
assert (u.Quantity(1100, unit=u.m) == u.Quantity(1, unit=u.s)) is False
def test_quantity_comparison(self):
assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.kilometer)
assert u.Quantity(900, unit=u.meter) < u.Quantity(1, unit=u.kilometer)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.second)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) < u.Quantity(1, unit=u.second)
assert u.Quantity(1100, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(1000, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(900, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(1000, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)
with pytest.raises(u.UnitsError):
assert u.Quantity(
1100, unit=u.meter) >= u.Quantity(1, unit=u.second)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) <= u.Quantity(1, unit=u.second)
assert u.Quantity(1200, unit=u.meter) != u.Quantity(1, unit=u.kilometer)
class TestQuantityDisplay:
scalarintq = u.Quantity(1, unit='m', dtype=int)
scalarfloatq = u.Quantity(1.3, unit='m')
arrq = u.Quantity([1, 2.3, 8.9], unit='m')
scalar_complex_q = u.Quantity(complex(1.0, 2.0))
scalar_big_complex_q = u.Quantity(complex(1.0, 2.0e27) * 1e25)
scalar_big_neg_complex_q = u.Quantity(complex(-1.0, -2.0e27) * 1e36)
arr_complex_q = u.Quantity(np.arange(3) * (complex(-1.0, -2.0e27) * 1e36))
big_arr_complex_q = u.Quantity(np.arange(125) * (complex(-1.0, -2.0e27) * 1e36))
def test_dimensionless_quantity_repr(self):
q2 = u.Quantity(1., unit='m-1')
q3 = u.Quantity(1, unit='m-1', dtype=int)
if NUMPY_LT_1_14:
assert repr(self.scalarintq * q2) == "<Quantity 1.0>"
assert repr(self.arrq * q2) == "<Quantity [ 1. , 2.3, 8.9]>"
else:
assert repr(self.scalarintq * q2) == "<Quantity 1.>"
assert repr(self.arrq * q2) == "<Quantity [1. , 2.3, 8.9]>"
assert repr(self.scalarintq * q3) == "<Quantity 1>"
def test_dimensionless_quantity_str(self):
q2 = u.Quantity(1., unit='m-1')
q3 = u.Quantity(1, unit='m-1', dtype=int)
assert str(self.scalarintq * q2) == "1.0"
assert str(self.scalarintq * q3) == "1"
if NUMPY_LT_1_14:
assert str(self.arrq * q2) == "[ 1. 2.3 8.9]"
else:
assert str(self.arrq * q2) == "[1. 2.3 8.9]"
def test_dimensionless_quantity_format(self):
q1 = u.Quantity(3.14)
assert format(q1, '.2f') == '3.14'
def test_scalar_quantity_str(self):
assert str(self.scalarintq) == "1 m"
assert str(self.scalarfloatq) == "1.3 m"
def test_scalar_quantity_repr(self):
assert repr(self.scalarintq) == "<Quantity 1 m>"
assert repr(self.scalarfloatq) == "<Quantity 1.3 m>"
def test_array_quantity_str(self):
if NUMPY_LT_1_14:
assert str(self.arrq) == "[ 1. 2.3 8.9] m"
else:
assert str(self.arrq) == "[1. 2.3 8.9] m"
def test_array_quantity_repr(self):
if NUMPY_LT_1_14:
assert repr(self.arrq) == "<Quantity [ 1. , 2.3, 8.9] m>"
else:
assert repr(self.arrq) == "<Quantity [1. , 2.3, 8.9] m>"
def test_scalar_quantity_format(self):
assert format(self.scalarintq, '02d') == "01 m"
assert format(self.scalarfloatq, '.1f') == "1.3 m"
assert format(self.scalarfloatq, '.0f') == "1 m"
def test_uninitialized_unit_format(self):
bad_quantity = np.arange(10.).view(u.Quantity)
assert str(bad_quantity).endswith(_UNIT_NOT_INITIALISED)
assert repr(bad_quantity).endswith(_UNIT_NOT_INITIALISED + '>')
def test_repr_latex(self):
from ...units.quantity import conf
q2scalar = u.Quantity(1.5e14, 'm/s')
assert self.scalarintq._repr_latex_() == r'$1 \; \mathrm{m}$'
assert self.scalarfloatq._repr_latex_() == r'$1.3 \; \mathrm{m}$'
assert (q2scalar._repr_latex_() ==
r'$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$')
assert self.arrq._repr_latex_() == r'$[1,~2.3,~8.9] \; \mathrm{m}$'
# Complex quantities
assert self.scalar_complex_q._repr_latex_() == r'$(1+2i) \; \mathrm{}$'
assert (self.scalar_big_complex_q._repr_latex_() ==
r'$(1 \times 10^{25}+2 \times 10^{52}i) \; \mathrm{}$')
assert (self.scalar_big_neg_complex_q._repr_latex_() ==
r'$(-1 \times 10^{36}-2 \times 10^{63}i) \; \mathrm{}$')
assert (self.arr_complex_q._repr_latex_() ==
(r'$[(0-0i),~(-1 \times 10^{36}-2 \times 10^{63}i),'
r'~(-2 \times 10^{36}-4 \times 10^{63}i)] \; \mathrm{}$'))
assert r'\dots' in self.big_arr_complex_q._repr_latex_()
qmed = np.arange(100)*u.m
qbig = np.arange(1000)*u.m
qvbig = np.arange(10000)*1e9*u.m
pops = np.get_printoptions()
oldlat = conf.latex_array_threshold
try:
# check precision behavior
q = u.Quantity(987654321.123456789, 'm/s')
qa = np.array([7.89123, 123456789.987654321, 0]) * u.cm
np.set_printoptions(precision=8)
assert q._repr_latex_() == r'$9.8765432 \times 10^{8} \; \mathrm{\frac{m}{s}}$'
assert qa._repr_latex_() == r'$[7.89123,~1.2345679 \times 10^{8},~0] \; \mathrm{cm}$'
np.set_printoptions(precision=2)
assert q._repr_latex_() == r'$9.9 \times 10^{8} \; \mathrm{\frac{m}{s}}$'
assert qa._repr_latex_() == r'$[7.9,~1.2 \times 10^{8},~0] \; \mathrm{cm}$'
# check thresholding behavior
conf.latex_array_threshold = 100 # should be default
lsmed = qmed._repr_latex_()
assert r'\dots' not in lsmed
lsbig = qbig._repr_latex_()
assert r'\dots' in lsbig
lsvbig = qvbig._repr_latex_()
assert r'\dots' in lsvbig
conf.latex_array_threshold = 1001
lsmed = qmed._repr_latex_()
assert r'\dots' not in lsmed
lsbig = qbig._repr_latex_()
assert r'\dots' not in lsbig
lsvbig = qvbig._repr_latex_()
assert r'\dots' in lsvbig
conf.latex_array_threshold = -1 # means use the numpy threshold
np.set_printoptions(threshold=99)
lsmed = qmed._repr_latex_()
assert r'\dots' in lsmed
lsbig = qbig._repr_latex_()
assert r'\dots' in lsbig
lsvbig = qvbig._repr_latex_()
assert r'\dots' in lsvbig
finally:
# prevent side-effects from influencing other tests
np.set_printoptions(**pops)
conf.latex_array_threshold = oldlat
qinfnan = [np.inf, -np.inf, np.nan] * u.m
assert qinfnan._repr_latex_() == r'$[\infty,~-\infty,~{\rm NaN}] \; \mathrm{m}$'
def test_decompose():
q1 = 5 * u.N
assert q1.decompose() == (5 * u.kg * u.m * u.s ** -2)
def test_decompose_regression():
"""
Regression test for bug #1163
If decompose was called multiple times on a Quantity with an array and a
scale != 1, the result changed every time. This is because the value was
being referenced not copied, then modified, which changed the original
value.
"""
q = np.array([1, 2, 3]) * u.m / (2. * u.km)
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
assert np.all(q == np.array([1, 2, 3]) * u.m / (2. * u.km))
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
def test_arrays():
"""
Test using quantites with array values
"""
qsec = u.Quantity(np.arange(10), u.second)
assert isinstance(qsec.value, np.ndarray)
assert not qsec.isscalar
# len and indexing should work for arrays
assert len(qsec) == len(qsec.value)
qsecsub25 = qsec[2:5]
assert qsecsub25.unit == qsec.unit
assert isinstance(qsecsub25, u.Quantity)
assert len(qsecsub25) == 3
# make sure isscalar, len, and indexing behave correcly for non-arrays.
qsecnotarray = u.Quantity(10., u.second)
assert qsecnotarray.isscalar
with pytest.raises(TypeError):
len(qsecnotarray)
with pytest.raises(TypeError):
qsecnotarray[0]
qseclen0array = u.Quantity(np.array(10), u.second, dtype=int)
# 0d numpy array should act basically like a scalar
assert qseclen0array.isscalar
with pytest.raises(TypeError):
len(qseclen0array)
with pytest.raises(TypeError):
qseclen0array[0]
assert isinstance(qseclen0array.value, int)
a = np.array([(1., 2., 3.), (4., 5., 6.), (7., 8., 9.)],
dtype=[('x', float),
('y', float),
('z', float)])
qkpc = u.Quantity(a, u.kpc)
assert not qkpc.isscalar
qkpc0 = qkpc[0]
assert qkpc0.value == a[0]
assert qkpc0.unit == qkpc.unit
assert isinstance(qkpc0, u.Quantity)
assert qkpc0.isscalar
qkpcx = qkpc['x']
assert np.all(qkpcx.value == a['x'])
assert qkpcx.unit == qkpc.unit
assert isinstance(qkpcx, u.Quantity)
assert not qkpcx.isscalar
qkpcx1 = qkpc['x'][1]
assert qkpcx1.unit == qkpc.unit
assert isinstance(qkpcx1, u.Quantity)
assert qkpcx1.isscalar
qkpc1x = qkpc[1]['x']
assert qkpc1x.isscalar
assert qkpc1x == qkpcx1
# can also create from lists, will auto-convert to arrays
qsec = u.Quantity(list(range(10)), u.second)
assert isinstance(qsec.value, np.ndarray)
# quantity math should work with arrays
assert_array_equal((qsec * 2).value, (np.arange(10) * 2))
assert_array_equal((qsec / 2).value, (np.arange(10) / 2))
# quantity addition/subtraction should *not* work with arrays b/c unit
# ambiguous
with pytest.raises(u.UnitsError):
assert_array_equal((qsec + 2).value, (np.arange(10) + 2))
with pytest.raises(u.UnitsError):
assert_array_equal((qsec - 2).value, (np.arange(10) + 2))
# should create by unit multiplication, too
qsec2 = np.arange(10) * u.second
qsec3 = u.second * np.arange(10)
assert np.all(qsec == qsec2)
assert np.all(qsec2 == qsec3)
# make sure numerical-converters fail when arrays are present
with pytest.raises(TypeError):
float(qsec)
with pytest.raises(TypeError):
int(qsec)
def test_array_indexing_slicing():
q = np.array([1., 2., 3.]) * u.m
assert q[0] == 1. * u.m
assert np.all(q[0:2] == u.Quantity([1., 2.], u.m))
def test_array_setslice():
q = np.array([1., 2., 3.]) * u.m
q[1:2] = np.array([400.]) * u.cm
assert np.all(q == np.array([1., 4., 3.]) * u.m)
def test_inverse_quantity():
"""
Regression test from issue #679
"""
q = u.Quantity(4., u.meter / u.second)
qot = q / 2
toq = 2 / q
npqot = q / np.array(2)
assert npqot.value == 2.0
assert npqot.unit == (u.meter / u.second)
assert qot.value == 2.0
assert qot.unit == (u.meter / u.second)
assert toq.value == 0.5
assert toq.unit == (u.second / u.meter)
def test_quantity_mutability():
q = u.Quantity(9.8, u.meter / u.second / u.second)
with pytest.raises(AttributeError):
q.value = 3
with pytest.raises(AttributeError):
q.unit = u.kg
def test_quantity_initialized_with_quantity():
q1 = u.Quantity(60, u.second)
q2 = u.Quantity(q1, u.minute)
assert q2.value == 1
q3 = u.Quantity([q1, q2], u.second)
assert q3[0].value == 60
assert q3[1].value == 60
q4 = u.Quantity([q2, q1])
assert q4.unit == q2.unit
assert q4[0].value == 1
assert q4[1].value == 1
def test_quantity_string_unit():
q1 = 1. * u.m / 's'
assert q1.value == 1
assert q1.unit == (u.m / u.s)
q2 = q1 * "m"
assert q2.unit == ((u.m * u.m) / u.s)
@raises(ValueError)
def test_quantity_invalid_unit_string():
"foo" * u.m
def test_implicit_conversion():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
assert_allclose(q.centimeter, 100)
assert_allclose(q.cm, 100)
assert_allclose(q.parsec, 3.240779289469756e-17)
def test_implicit_conversion_autocomplete():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
q.foo = 42
attrs = dir(q)
assert 'centimeter' in attrs
assert 'cm' in attrs
assert 'parsec' in attrs
assert 'foo' in attrs
assert 'to' in attrs
assert 'value' in attrs
# Something from the base class, object
assert '__setattr__' in attrs
with pytest.raises(AttributeError):
q.l
def test_quantity_iterability():
"""Regressiont est for issue #878.
Scalar quantities should not be iterable and should raise a type error on
iteration.
"""
q1 = [15.0, 17.0] * u.m
assert isiterable(q1)
q2 = next(iter(q1))
assert q2 == 15.0 * u.m
assert not isiterable(q2)
pytest.raises(TypeError, iter, q2)
def test_copy():
q1 = u.Quantity(np.array([[1., 2., 3.], [4., 5., 6.]]), unit=u.m)
q2 = q1.copy()
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
q3 = q1.copy(order='F')
assert q3.flags['F_CONTIGUOUS']
assert np.all(q1.value == q3.value)
assert q1.unit == q3.unit
assert q1.dtype == q3.dtype
assert q1.value is not q3.value
q4 = q1.copy(order='C')
assert q4.flags['C_CONTIGUOUS']
assert np.all(q1.value == q4.value)
assert q1.unit == q4.unit
assert q1.dtype == q4.dtype
assert q1.value is not q4.value
def test_deepcopy():
q1 = u.Quantity(np.array([1., 2., 3.]), unit=u.m)
q2 = copy.deepcopy(q1)
assert isinstance(q2, u.Quantity)
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
def test_equality_numpy_scalar():
"""
A regression test to ensure that numpy scalars are correctly compared
(which originally failed due to the lack of ``__array_priority__``).
"""
assert 10 != 10. * u.m
assert np.int64(10) != 10 * u.m
assert 10 * u.m != np.int64(10)
def test_quantity_pickelability():
"""
Testing pickleability of quantity
"""
q1 = np.arange(10) * u.m
q2 = pickle.loads(pickle.dumps(q1))
assert np.all(q1.value == q2.value)
assert q1.unit.is_equivalent(q2.unit)
assert q1.unit == q2.unit
def test_quantity_initialisation_from_string():
q = u.Quantity('1')
assert q.unit == u.dimensionless_unscaled
assert q.value == 1.
q = u.Quantity('1.5 m/s')
assert q.unit == u.m/u.s
assert q.value == 1.5
assert u.Unit(q) == u.Unit('1.5 m/s')
q = u.Quantity('.5 m')
assert q == u.Quantity(0.5, u.m)
q = u.Quantity('-1e1km')
assert q == u.Quantity(-10, u.km)
q = u.Quantity('-1e+1km')
assert q == u.Quantity(-10, u.km)
q = u.Quantity('+.5km')
assert q == u.Quantity(.5, u.km)
q = u.Quantity('+5e-1km')
assert q == u.Quantity(.5, u.km)
q = u.Quantity('5', u.m)
assert q == u.Quantity(5., u.m)
q = u.Quantity('5 km', u.m)
assert q.value == 5000.
assert q.unit == u.m
q = u.Quantity('5Em')
assert q == u.Quantity(5., u.Em)
with pytest.raises(TypeError):
u.Quantity('')
with pytest.raises(TypeError):
u.Quantity('m')
with pytest.raises(TypeError):
u.Quantity('1.2.3 deg')
with pytest.raises(TypeError):
u.Quantity('1+deg')
with pytest.raises(TypeError):
u.Quantity('1-2deg')
with pytest.raises(TypeError):
u.Quantity('1.2e-13.3m')
with pytest.raises(TypeError):
u.Quantity(['5'])
with pytest.raises(TypeError):
u.Quantity(np.array(['5']))
with pytest.raises(ValueError):
u.Quantity('5E')
with pytest.raises(ValueError):
u.Quantity('5 foo')
def test_unsupported():
q1 = np.arange(10) * u.m
with pytest.raises(TypeError):
q2 = np.bitwise_and(q1, q1)
def test_unit_identity():
q = 1.0 * u.hour
assert q.unit is u.hour
def test_quantity_to_view():
q1 = np.array([1000, 2000]) * u.m
q2 = q1.to(u.km)
assert q1.value[0] == 1000
assert q2.value[0] == 1
@raises(ValueError)
def test_quantity_tuple_power():
(5.0 * u.m) ** (1, 2)
def test_quantity_fraction_power():
q = (25.0 * u.m**2) ** Fraction(1, 2)
assert q.value == 5.
assert q.unit == u.m
# Regression check to ensure we didn't create an object type by raising
# the value of the quantity to a Fraction. [#3922]
assert q.dtype.kind == 'f'
def test_inherit_docstrings():
assert u.Quantity.argmax.__doc__ == np.ndarray.argmax.__doc__
def test_quantity_from_table():
"""
Checks that units from tables are respected when converted to a Quantity.
This also generically checks the use of *anything* with a `unit` attribute
passed into Quantity
"""
from... table import Table
t = Table(data=[np.arange(5), np.arange(5)], names=['a', 'b'])
t['a'].unit = u.kpc
qa = u.Quantity(t['a'])
assert qa.unit == u.kpc
assert_array_equal(qa.value, t['a'])
qb = u.Quantity(t['b'])
assert qb.unit == u.dimensionless_unscaled
assert_array_equal(qb.value, t['b'])
# This does *not* auto-convert, because it's not necessarily obvious that's
# desired. Instead we revert to standard `Quantity` behavior
qap = u.Quantity(t['a'], u.pc)
assert qap.unit == u.pc
assert_array_equal(qap.value, t['a'] * 1000)
qbp = u.Quantity(t['b'], u.pc)
assert qbp.unit == u.pc
assert_array_equal(qbp.value, t['b'])
def test_assign_slice_with_quantity_like():
# Regression tests for gh-5961
from ... table import Table, Column
# first check directly that we can use a Column to assign to a slice.
c = Column(np.arange(10.), unit=u.mm)
q = u.Quantity(c)
q[:2] = c[:2]
# next check that we do not fail the original problem.
t = Table()
t['x'] = np.arange(10) * u.mm
t['y'] = np.ones(10) * u.mm
assert type(t['x']) is Column
xy = np.vstack([t['x'], t['y']]).T * u.mm
ii = [0, 2, 4]
assert xy[ii, 0].unit == t['x'][ii].unit
# should not raise anything
xy[ii, 0] = t['x'][ii]
def test_insert():
"""
Test Quantity.insert method. This does not test the full capabilities
of the underlying np.insert, but hits the key functionality for
Quantity.
"""
q = [1, 2] * u.m
# Insert a compatible float with different units
q2 = q.insert(0, 1 * u.km)
assert np.all(q2.value == [1000, 1, 2])
assert q2.unit is u.m
assert q2.dtype.kind == 'f'
if minversion(np, '1.8.0'):
q2 = q.insert(1, [1, 2] * u.km)
assert np.all(q2.value == [1, 1000, 2000, 2])
assert q2.unit is u.m
# Cannot convert 1.5 * u.s to m
with pytest.raises(u.UnitsError):
q.insert(1, 1.5 * u.s)
# Tests with multi-dim quantity
q = [[1, 2], [3, 4]] * u.m
q2 = q.insert(1, [10, 20] * u.m, axis=0)
assert np.all(q2.value == [[1, 2],
[10, 20],
[3, 4]])
q2 = q.insert(1, [10, 20] * u.m, axis=1)
assert np.all(q2.value == [[1, 10, 2],
[3, 20, 4]])
q2 = q.insert(1, 10 * u.m, axis=1)
assert np.all(q2.value == [[1, 10, 2],
[3, 10, 4]])
def test_repr_array_of_quantity():
"""
Test print/repr of object arrays of Quantity objects with different
units.
Regression test for the issue first reported in
https://github.com/astropy/astropy/issues/3777
"""
a = np.array([1 * u.m, 2 * u.s], dtype=object)
if NUMPY_LT_1_14:
assert repr(a) == 'array([<Quantity 1.0 m>, <Quantity 2.0 s>], dtype=object)'
assert str(a) == '[<Quantity 1.0 m> <Quantity 2.0 s>]'
else:
assert repr(a) == 'array([<Quantity 1. m>, <Quantity 2. s>], dtype=object)'
assert str(a) == '[<Quantity 1. m> <Quantity 2. s>]'
class TestSpecificTypeQuantity:
def setup(self):
class Length(u.SpecificTypeQuantity):
_equivalent_unit = u.m
class Length2(Length):
_default_unit = u.m
class Length3(Length):
_unit = u.m
self.Length = Length
self.Length2 = Length2
self.Length3 = Length3
def test_creation(self):
l = self.Length(np.arange(10.)*u.km)
assert type(l) is self.Length
with pytest.raises(u.UnitTypeError):
self.Length(np.arange(10.) * u.hour)
with pytest.raises(u.UnitTypeError):
self.Length(np.arange(10.))
l2 = self.Length2(np.arange(5.))
assert type(l2) is self.Length2
assert l2._default_unit is self.Length2._default_unit
with pytest.raises(u.UnitTypeError):
self.Length3(np.arange(10.))
def test_view(self):
l = (np.arange(5.) * u.km).view(self.Length)
assert type(l) is self.Length
with pytest.raises(u.UnitTypeError):
(np.arange(5.) * u.s).view(self.Length)
v = np.arange(5.).view(self.Length)
assert type(v) is self.Length
assert v._unit is None
l3 = np.ones((2, 2)).view(self.Length3)
assert type(l3) is self.Length3
assert l3.unit is self.Length3._unit
def test_operation_precedence_and_fallback(self):
l = self.Length(np.arange(5.)*u.cm)
sum1 = l + 1.*u.m
assert type(sum1) is self.Length
sum2 = 1.*u.km + l
assert type(sum2) is self.Length
sum3 = l + l
assert type(sum3) is self.Length
res1 = l * (1.*u.m)
assert type(res1) is u.Quantity
res2 = l * l
assert type(res2) is u.Quantity
@pytest.mark.skipif('not HAS_MATPLOTLIB')
@pytest.mark.xfail('MATPLOTLIB_LT_15')
class TestQuantityMatplotlib:
"""Test if passing matplotlib quantities works.
TODO: create PNG output and check against reference image
once `astropy.wcsaxes` is merged, which provides
the machinery for this.
See https://github.com/astropy/astropy/issues/1881
See https://github.com/astropy/astropy/pull/2139
"""
def test_plot(self):
data = u.Quantity([4, 5, 6], 's')
plt.plot(data)
def test_scatter(self):
x = u.Quantity([4, 5, 6], 'second')
y = [1, 3, 4] * u.m
plt.scatter(x, y)
def test_unit_class_override():
class MyQuantity(u.Quantity):
pass
my_unit = u.Unit("my_deg", u.deg)
my_unit._quantity_class = MyQuantity
q1 = u.Quantity(1., my_unit)
assert type(q1) is u.Quantity
q2 = u.Quantity(1., my_unit, subok=True)
assert type(q2) is MyQuantity
|
356cef4cba0422d7ad68cb562b512f722fe35a1f732549d2a2480d4b61142990 | # The purpose of these tests are to ensure that calling ufuncs with quantities
# returns quantities with the right units, or raises exceptions.
import warnings
from collections import namedtuple
import pytest
import numpy as np
from numpy.testing import assert_allclose
from ... import units as u
from .. import quantity_helper as qh
from ..._erfa import ufunc as erfa_ufunc
from ...tests.helper import raises
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
testcase = namedtuple('testcase', ['f', 'q_in', 'q_out'])
testexc = namedtuple('testexc', ['f', 'q_in', 'exc', 'msg'])
testwarn = namedtuple('testwarn', ['f', 'q_in', 'wfilter'])
@pytest.mark.skip
def test_testcase(tc):
results = tc.f(*tc.q_in)
# careful of the following line, would break on a function returning
# a single tuple (as opposed to tuple of return values)
results = (results, ) if type(results) != tuple else results
for result, expected in zip(results, tc.q_out):
assert result.unit == expected.unit
assert_allclose(result.value, expected.value, atol=1.E-15)
@pytest.mark.skip
def test_testexc(te):
with pytest.raises(te.exc) as exc:
te.f(*te.q_in)
if te.msg is not None:
assert te.msg in exc.value.args[0]
@pytest.mark.skip
def test_testwarn(tw):
with warnings.catch_warnings():
warnings.filterwarnings(tw.wfilter)
tw.f(*tw.q_in)
class TestUfuncCoverage:
"""Test that we cover all ufunc's"""
def test_coverage(self):
all_np_ufuncs = set([ufunc for ufunc in np.core.umath.__dict__.values()
if isinstance(ufunc, np.ufunc)])
all_q_ufuncs = (qh.UNSUPPORTED_UFUNCS |
set(qh.UFUNC_HELPERS.keys()))
# Check that every numpy ufunc is covered.
assert all_np_ufuncs - all_q_ufuncs == set()
# Check that all ufuncs we cover come from numpy, erfa, and scipy.
# (Since coverage for scipy and erfa is incomplete, we do not check
# this the other way).
all_erfa_ufuncs = set([ufunc for ufunc in erfa_ufunc.__dict__.values()
if isinstance(ufunc, np.ufunc)])
if HAS_SCIPY:
import scipy.special as sps
all_sps_ufuncs = set([ufunc for ufunc in sps.__dict__.values()
if isinstance(ufunc, np.ufunc)])
else:
all_sps_ufuncs = set()
assert (all_q_ufuncs - all_np_ufuncs -
all_sps_ufuncs - all_erfa_ufuncs == set())
class TestQuantityTrigonometricFuncs:
"""
Test trigonometric functions
"""
@pytest.mark.parametrize('tc', (
testcase(
f=np.sin,
q_in=(30. * u.degree, ),
q_out=(0.5*u.dimensionless_unscaled, )
),
testcase(
f=np.sin,
q_in=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
q_out=(np.array([0., 1. / np.sqrt(2.), 1.]) * u.one, )
),
testcase(
f=np.arcsin,
q_in=(np.sin(30. * u.degree), ),
q_out=(np.radians(30.) * u.radian, )
),
testcase(
f=np.arcsin,
q_in=(np.sin(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian), ),
q_out=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, )
),
testcase(
f=np.cos,
q_in=(np.pi / 3. * u.radian, ),
q_out=(0.5 * u.dimensionless_unscaled, )
),
testcase(
f=np.cos,
q_in=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
q_out=(np.array([1., 1. / np.sqrt(2.), 0.]) * u.one, )
),
testcase(
f=np.arccos,
q_in=(np.cos(np.pi / 3. * u.radian), ),
q_out=(np.pi / 3. * u.radian, )
),
testcase(
f=np.arccos,
q_in=(np.cos(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian), ),
q_out=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
),
testcase(
f=np.tan,
q_in=(np.pi / 3. * u.radian, ),
q_out=(np.sqrt(3.) * u.dimensionless_unscaled, )
),
testcase(
f=np.tan,
q_in=(np.array([0., 45., 135., 180.]) * u.degree, ),
q_out=(np.array([0., 1., -1., 0.]) * u.dimensionless_unscaled, )
),
testcase(
f=np.arctan,
q_in=(np.tan(np.pi / 3. * u.radian), ),
q_out=(np.pi / 3. * u.radian, )
),
testcase(
f=np.arctan,
q_in=(np.tan(np.array([10., 30., 70., 80.]) * u.degree), ),
q_out=(np.radians(np.array([10., 30., 70., 80.]) * u.degree), )
),
testcase(
f=np.arctan2,
q_in=(np.array([10., 30., 70., 80.]) * u.m, 2.0 * u.km),
q_out=(np.arctan2(np.array([10., 30., 70., 80.]),
2000.) * u.radian, )
),
testcase(
f=np.arctan2,
q_in=((np.array([10., 80.]) * u.m / (2.0 * u.km)).to(u.one), 1.),
q_out=(np.arctan2(np.array([10., 80.]) / 2000., 1.) * u.radian, )
),
testcase(
f=np.deg2rad,
q_in=(180. * u.degree, ),
q_out=(np.pi * u.radian, )
),
testcase(
f=np.radians,
q_in=(180. * u.degree, ),
q_out=(np.pi * u.radian, )
),
testcase(
f=np.deg2rad,
q_in=(3. * u.radian, ),
q_out=(3. * u.radian, )
),
testcase(
f=np.radians,
q_in=(3. * u.radian, ),
q_out=(3. * u.radian, )
),
testcase(
f=np.rad2deg,
q_in=(60. * u.degree, ),
q_out=(60. * u.degree, )
),
testcase(
f=np.degrees,
q_in=(60. * u.degree, ),
q_out=(60. * u.degree, )
),
testcase(
f=np.rad2deg,
q_in=(np.pi * u.radian, ),
q_out=(180. * u.degree, )
),
testcase(
f=np.degrees,
q_in=(np.pi * u.radian, ),
q_out=(180. * u.degree, )
)
))
def test_testcases(self, tc):
return test_testcase(tc)
@pytest.mark.parametrize('te', (
testexc(
f=np.deg2rad,
q_in=(3. * u.m, ),
exc=TypeError,
msg=None
),
testexc(
f=np.radians,
q_in=(3. * u.m, ),
exc=TypeError,
msg=None
),
testexc(
f=np.rad2deg,
q_in=(3. * u.m),
exc=TypeError,
msg=None
),
testexc(
f=np.degrees,
q_in=(3. * u.m),
exc=TypeError,
msg=None
),
testexc(
f=np.sin,
q_in=(3. * u.m, ),
exc=TypeError,
msg="Can only apply 'sin' function to quantities with angle units"
),
testexc(
f=np.arcsin,
q_in=(3. * u.m, ),
exc=TypeError,
msg="Can only apply 'arcsin' function to dimensionless quantities"
),
testexc(
f=np.cos,
q_in=(3. * u.s, ),
exc=TypeError,
msg="Can only apply 'cos' function to quantities with angle units"
),
testexc(
f=np.arccos,
q_in=(3. * u.s, ),
exc=TypeError,
msg="Can only apply 'arccos' function to dimensionless quantities"
),
testexc(
f=np.tan,
q_in=(np.array([1, 2, 3]) * u.N, ),
exc=TypeError,
msg="Can only apply 'tan' function to quantities with angle units"
),
testexc(
f=np.arctan,
q_in=(np.array([1, 2, 3]) * u.N, ),
exc=TypeError,
msg="Can only apply 'arctan' function to dimensionless quantities"
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1. * u.s),
exc=u.UnitsError,
msg="compatible dimensions"
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.),
exc=u.UnitsError,
msg="dimensionless quantities when other arg"
)
))
def test_testexcs(self, te):
return test_testexc(te)
@pytest.mark.parametrize('tw', (
testwarn(
f=np.arcsin,
q_in=(27. * u.pc / (15 * u.kpc), ),
wfilter='error'
),
))
def test_testwarns(self, tw):
return test_testwarn(tw)
class TestQuantityMathFuncs:
"""
Test other mathematical functions
"""
def test_multiply_scalar(self):
assert np.multiply(4. * u.m, 2. / u.s) == 8. * u.m / u.s
assert np.multiply(4. * u.m, 2.) == 8. * u.m
assert np.multiply(4., 2. / u.s) == 8. / u.s
def test_multiply_array(self):
assert np.all(np.multiply(np.arange(3.) * u.m, 2. / u.s) ==
np.arange(0, 6., 2.) * u.m / u.s)
@pytest.mark.parametrize('function', (np.divide, np.true_divide))
def test_divide_scalar(self, function):
assert function(4. * u.m, 2. * u.s) == function(4., 2.) * u.m / u.s
assert function(4. * u.m, 2.) == function(4., 2.) * u.m
assert function(4., 2. * u.s) == function(4., 2.) / u.s
@pytest.mark.parametrize('function', (np.divide, np.true_divide))
def test_divide_array(self, function):
assert np.all(function(np.arange(3.) * u.m, 2. * u.s) ==
function(np.arange(3.), 2.) * u.m / u.s)
def test_floor_divide_remainder_and_divmod(self):
inch = u.Unit(0.0254 * u.m)
dividend = np.array([1., 2., 3.]) * u.m
divisor = np.array([3., 4., 5.]) * inch
quotient = dividend // divisor
remainder = dividend % divisor
assert_allclose(quotient.value, [13., 19., 23.])
assert quotient.unit == u.dimensionless_unscaled
assert_allclose(remainder.value, [0.0094, 0.0696, 0.079])
assert remainder.unit == dividend.unit
quotient2 = np.floor_divide(dividend, divisor)
remainder2 = np.remainder(dividend, divisor)
assert np.all(quotient2 == quotient)
assert np.all(remainder2 == remainder)
quotient3, remainder3 = divmod(dividend, divisor)
assert np.all(quotient3 == quotient)
assert np.all(remainder3 == remainder)
with pytest.raises(TypeError):
divmod(dividend, u.km)
with pytest.raises(TypeError):
dividend // u.km
with pytest.raises(TypeError):
dividend % u.km
quotient4, remainder4 = np.divmod(dividend, divisor)
assert np.all(quotient4 == quotient)
assert np.all(remainder4 == remainder)
with pytest.raises(TypeError):
np.divmod(dividend, u.km)
def test_sqrt_scalar(self):
assert np.sqrt(4. * u.m) == 2. * u.m ** 0.5
def test_sqrt_array(self):
assert np.all(np.sqrt(np.array([1., 4., 9.]) * u.m)
== np.array([1., 2., 3.]) * u.m ** 0.5)
def test_square_scalar(self):
assert np.square(4. * u.m) == 16. * u.m ** 2
def test_square_array(self):
assert np.all(np.square(np.array([1., 2., 3.]) * u.m)
== np.array([1., 4., 9.]) * u.m ** 2)
def test_reciprocal_scalar(self):
assert np.reciprocal(4. * u.m) == 0.25 / u.m
def test_reciprocal_array(self):
assert np.all(np.reciprocal(np.array([1., 2., 4.]) * u.m)
== np.array([1., 0.5, 0.25]) / u.m)
def test_heaviside_scalar(self):
assert np.heaviside(0. * u.m, 0.5) == 0.5 * u.dimensionless_unscaled
assert np.heaviside(0. * u.s,
25 * u.percent) == 0.25 * u.dimensionless_unscaled
assert np.heaviside(2. * u.J, 0.25) == 1. * u.dimensionless_unscaled
def test_heaviside_array(self):
values = np.array([-1., 0., 0., +1.])
halfway = np.array([0.75, 0.25, 0.75, 0.25]) * u.dimensionless_unscaled
assert np.all(np.heaviside(values * u.m,
halfway * u.dimensionless_unscaled) ==
[0, 0.25, 0.75, +1.] * u.dimensionless_unscaled)
@pytest.mark.parametrize('function', (np.cbrt, ))
def test_cbrt_scalar(self, function):
assert function(8. * u.m**3) == 2. * u.m
@pytest.mark.parametrize('function', (np.cbrt, ))
def test_cbrt_array(self, function):
# Calculate cbrt on both sides since on Windows the cube root of 64
# does not exactly equal 4. See 4388.
values = np.array([1., 8., 64.])
assert np.all(function(values * u.m**3) ==
function(values) * u.m)
def test_power_scalar(self):
assert np.power(4. * u.m, 2.) == 16. * u.m ** 2
assert np.power(4., 200. * u.cm / u.m) == \
u.Quantity(16., u.dimensionless_unscaled)
# regression check on #1696
assert np.power(4. * u.m, 0.) == 1. * u.dimensionless_unscaled
def test_power_array(self):
assert np.all(np.power(np.array([1., 2., 3.]) * u.m, 3.)
== np.array([1., 8., 27.]) * u.m ** 3)
# regression check on #1696
assert np.all(np.power(np.arange(4.) * u.m, 0.) ==
1. * u.dimensionless_unscaled)
# float_power only introduced in numpy 1.12
@pytest.mark.skipif("not hasattr(np, 'float_power')")
def test_float_power_array(self):
assert np.all(np.float_power(np.array([1., 2., 3.]) * u.m, 3.)
== np.array([1., 8., 27.]) * u.m ** 3)
# regression check on #1696
assert np.all(np.float_power(np.arange(4.) * u.m, 0.) ==
1. * u.dimensionless_unscaled)
@raises(ValueError)
def test_power_array_array(self):
np.power(4. * u.m, [2., 4.])
@raises(ValueError)
def test_power_array_array2(self):
np.power([2., 4.] * u.m, [2., 4.])
def test_power_array_array3(self):
# Identical unit fractions are converted automatically to dimensionless
# and should be allowed as base for np.power: #4764
q = [2., 4.] * u.m / u.m
powers = [2., 4.]
res = np.power(q, powers)
assert np.all(res.value == q.value ** powers)
assert res.unit == u.dimensionless_unscaled
# The same holds for unit fractions that are scaled dimensionless.
q2 = [2., 4.] * u.m / u.cm
# Test also against different types of exponent
for cls in (list, tuple, np.array, np.ma.array, u.Quantity):
res2 = np.power(q2, cls(powers))
assert np.all(res2.value == q2.to_value(1) ** powers)
assert res2.unit == u.dimensionless_unscaled
# Though for single powers, we keep the composite unit.
res3 = q2 ** 2
assert np.all(res3.value == q2.value ** 2)
assert res3.unit == q2.unit ** 2
assert np.all(res3 == q2 ** [2, 2])
def test_power_invalid(self):
with pytest.raises(TypeError) as exc:
np.power(3., 4. * u.m)
assert "raise something to a dimensionless" in exc.value.args[0]
def test_copysign_scalar(self):
assert np.copysign(3 * u.m, 1.) == 3. * u.m
assert np.copysign(3 * u.m, 1. * u.s) == 3. * u.m
assert np.copysign(3 * u.m, -1.) == -3. * u.m
assert np.copysign(3 * u.m, -1. * u.s) == -3. * u.m
def test_copysign_array(self):
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1.) ==
-np.array([1., 2., 3.]) * u.s)
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1. * u.m) ==
-np.array([1., 2., 3.]) * u.s)
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s,
np.array([-2., 2., -4.]) * u.m) ==
np.array([-1., 2., -3.]) * u.s)
q = np.copysign(np.array([1., 2., 3.]), -3 * u.m)
assert np.all(q == np.array([-1., -2., -3.]))
assert not isinstance(q, u.Quantity)
def test_ldexp_scalar(self):
assert np.ldexp(4. * u.m, 2) == 16. * u.m
def test_ldexp_array(self):
assert np.all(np.ldexp(np.array([1., 2., 3.]) * u.m, [3, 2, 1])
== np.array([8., 8., 6.]) * u.m)
def test_ldexp_invalid(self):
with pytest.raises(TypeError):
np.ldexp(3. * u.m, 4.)
with pytest.raises(TypeError):
np.ldexp(3., u.Quantity(4, u.m, dtype=int))
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_scalar(self, function):
q = function(3. * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(0.5)
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_array(self, function):
q = function(np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value
== function(np.array([1. / 3., 1. / 2., 1.])))
# should also work on quantities that can be made dimensionless
q2 = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value,
function(np.array([100. / 3., 100. / 2., 100.])))
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_invalid_units(self, function):
# Can't use exp() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
function(3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{0}' function to "
"dimensionless quantities"
.format(function.__name__))
def test_modf_scalar(self):
q = np.modf(9. * u.m / (600. * u.cm))
assert q == (0.5 * u.dimensionless_unscaled,
1. * u.dimensionless_unscaled)
def test_modf_array(self):
v = np.arange(10.) * u.m / (500. * u.cm)
q = np.modf(v)
n = np.modf(v.to_value(u.dimensionless_unscaled))
assert q[0].unit == u.dimensionless_unscaled
assert q[1].unit == u.dimensionless_unscaled
assert all(q[0].value == n[0])
assert all(q[1].value == n[1])
def test_frexp_scalar(self):
q = np.frexp(3. * u.m / (6. * u.m))
assert q == (np.array(0.5), np.array(0.0))
def test_frexp_array(self):
q = np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert all((_q0, _q1) == np.frexp(_d) for _q0, _q1, _d
in zip(q[0], q[1], [1. / 3., 1. / 2., 1.]))
def test_frexp_invalid_units(self):
# Can't use prod() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
np.frexp(3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply 'frexp' function to "
"unscaled dimensionless quantities")
# also does not work on quantities that can be made dimensionless
with pytest.raises(TypeError) as exc:
np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert exc.value.args[0] == ("Can only apply 'frexp' function to "
"unscaled dimensionless quantities")
@pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_array(self, function):
q = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm), 1.)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
function(np.array([100. / 3., 100. / 2., 100.]), 1.))
@pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_invalid_units(self, function):
with pytest.raises(TypeError) as exc:
function(1. * u.km / u.s, 3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{0}' function to "
"dimensionless quantities"
.format(function.__name__))
class TestInvariantUfuncs:
@pytest.mark.parametrize(('ufunc'), [np.absolute, np.fabs,
np.conj, np.conjugate,
np.negative, np.spacing, np.rint,
np.floor, np.ceil, np.positive])
def test_invariant_scalar(self, ufunc):
q_i = 4.7 * u.m
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert q_o.value == ufunc(q_i.value)
@pytest.mark.parametrize(('ufunc'), [np.absolute, np.conjugate,
np.negative, np.rint,
np.floor, np.ceil])
def test_invariant_array(self, ufunc):
q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert np.all(q_o.value == ufunc(q_i.value))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_scalar(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.km
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_array(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.us
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_one_arbitrary(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
arbitrary_unit_value = np.array([0.])
q_o = ufunc(q_i1, arbitrary_unit_value)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary_unit_value))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError) as exc:
ufunc(q_i1, q_i2)
assert "compatible dimensions" in exc.value.args[0]
class TestComparisonUfuncs:
@pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
np.less, np.less_equal,
np.not_equal, np.equal])
def test_comparison_valid_units(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.Ms
q_o = ufunc(q_i1, q_i2)
assert not isinstance(q_o, u.Quantity)
assert q_o.dtype == bool
assert np.all(q_o == ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
q_o2 = ufunc(q_i1 / q_i2, 2.)
assert not isinstance(q_o2, u.Quantity)
assert q_o2.dtype == bool
assert np.all(q_o2 == ufunc((q_i1 / q_i2)
.to_value(u.dimensionless_unscaled), 2.))
# comparison with 0., inf, nan is OK even for dimensional quantities
for arbitrary_unit_value in (0., np.inf, np.nan):
ufunc(q_i1, arbitrary_unit_value)
ufunc(q_i1, arbitrary_unit_value*np.ones(len(q_i1)))
# and just for completeness
ufunc(q_i1, np.array([0., np.inf, np.nan]))
@pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
np.less, np.less_equal,
np.not_equal, np.equal])
def test_comparison_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError) as exc:
ufunc(q_i1, q_i2)
assert "compatible dimensions" in exc.value.args[0]
class TestInplaceUfuncs:
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_ufunc_inplace(self, value):
# without scaling
s = value * u.rad
check = s
np.sin(s, out=s)
assert check is s
assert check.unit == u.dimensionless_unscaled
# with scaling
s2 = (value * u.rad).to(u.deg)
check2 = s2
np.sin(s2, out=s2)
assert check2 is s2
assert check2.unit == u.dimensionless_unscaled
assert_allclose(s.value, s2.value)
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_ufunc_inplace_2(self, value):
"""Check inplace works with non-quantity input and quantity output"""
s = value * u.m
check = s
np.absolute(value, out=s)
assert check is s
assert np.all(check.value == np.absolute(value))
assert check.unit is u.dimensionless_unscaled
np.sqrt(value, out=s)
assert check is s
assert np.all(check.value == np.sqrt(value))
assert check.unit is u.dimensionless_unscaled
np.exp(value, out=s)
assert check is s
assert np.all(check.value == np.exp(value))
assert check.unit is u.dimensionless_unscaled
np.arcsin(value/10., out=s)
assert check is s
assert np.all(check.value == np.arcsin(value/10.))
assert check.unit is u.radian
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_two_output_ufunc_inplace(self, value):
v = 100. * value * u.cm / u.m
v_copy = v.copy()
tmp = v.copy()
check = v
np.modf(v, tmp, v)
assert check is v
assert check.unit == u.dimensionless_unscaled
v2 = v_copy.to(u.dimensionless_unscaled)
check2 = v2
np.modf(v2, tmp, v2)
assert check2 is v2
assert check2.unit == u.dimensionless_unscaled
# can also replace in last position if no scaling is needed
v3 = v_copy.to(u.dimensionless_unscaled)
check3 = v3
np.modf(v3, v3, tmp)
assert check3 is v3
assert check3.unit == u.dimensionless_unscaled
# And now, with numpy >= 1.13, one can also replace input with
# first output when scaling
v4 = v_copy.copy()
check4 = v4
np.modf(v4, v4, tmp)
assert check4 is v4
assert check4.unit == u.dimensionless_unscaled
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_ufunc_inplace_1(self, value):
s = value * u.cycle
check = s
s /= 2.
assert check is s
assert np.all(check.value == value / 2.)
s /= u.s
assert check is s
assert check.unit == u.cycle / u.s
s *= 2. * u.s
assert check is s
assert np.all(check == value * u.cycle)
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_ufunc_inplace_2(self, value):
s = value * u.cycle
check = s
np.arctan2(s, s, out=s)
assert check is s
assert check.unit == u.radian
with pytest.raises(u.UnitsError):
s += 1. * u.m
assert check is s
assert check.unit == u.radian
np.arctan2(1. * u.deg, s, out=s)
assert check is s
assert check.unit == u.radian
np.add(1. * u.deg, s, out=s)
assert check is s
assert check.unit == u.deg
np.multiply(2. / u.s, s, out=s)
assert check is s
assert check.unit == u.deg / u.s
def test_two_argument_ufunc_inplace_3(self):
s = np.array([1., 2., 3.]) * u.dimensionless_unscaled
np.add(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s)
assert np.all(s.value == np.array([3., 6., 9.]))
assert s.unit is u.dimensionless_unscaled
np.arctan2(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s)
assert_allclose(s.value, np.arctan2(1., 2.))
assert s.unit is u.radian
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_two_output_ufunc_inplace(self, value):
v = value * u.m
divisor = 70.*u.cm
v1 = v.copy()
tmp = v.copy()
check = np.divmod(v1, divisor, out=(tmp, v1))
assert check[0] is tmp and check[1] is v1
assert tmp.unit == u.dimensionless_unscaled
assert v1.unit == v.unit
v2 = v.copy()
check2 = np.divmod(v2, divisor, out=(v2, tmp))
assert check2[0] is v2 and check2[1] is tmp
assert v2.unit == u.dimensionless_unscaled
assert tmp.unit == v.unit
v3a = v.copy()
v3b = v.copy()
check3 = np.divmod(v3a, divisor, out=(v3a, v3b))
assert check3[0] is v3a and check3[1] is v3b
assert v3a.unit == u.dimensionless_unscaled
assert v3b.unit == v.unit
def test_ufunc_inplace_non_contiguous_data(self):
# ensure inplace works also for non-contiguous data (closes #1834)
s = np.arange(10.) * u.m
s_copy = s.copy()
s2 = s[::2]
s2 += 1. * u.cm
assert np.all(s[::2] > s_copy[::2])
assert np.all(s[1::2] == s_copy[1::2])
def test_ufunc_inplace_non_standard_dtype(self):
"""Check that inplace operations check properly for casting.
First two tests that check that float32 is kept close #3976.
"""
a1 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a1 *= np.float32(10)
assert a1.unit is u.m
assert a1.dtype == np.float32
a2 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a2 += (20.*u.km)
assert a2.unit is u.m
assert a2.dtype == np.float32
# For integer, in-place only works if no conversion is done.
a3 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
a3 += u.Quantity(10, u.m, dtype=np.int64)
assert a3.unit is u.m
assert a3.dtype == np.int32
a4 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
with pytest.raises(TypeError):
a4 += u.Quantity(10, u.mm, dtype=np.int64)
class TestUfuncAt:
"""Test that 'at' method for ufuncs (calculates in-place at given indices)
For Quantities, since calculations are in-place, it makes sense only
if the result is still a quantity, and if the unit does not have to change
"""
def test_one_argument_ufunc_at(self):
q = np.arange(10.) * u.m
i = np.array([1, 2])
qv = q.value.copy()
np.negative.at(q, i)
np.negative.at(qv, i)
assert np.all(q.value == qv)
assert q.unit is u.m
# cannot change from quantity to bool array
with pytest.raises(TypeError):
np.isfinite.at(q, i)
# for selective in-place, cannot change the unit
with pytest.raises(u.UnitsError):
np.square.at(q, i)
# except if the unit does not change (i.e., dimensionless)
d = np.arange(10.) * u.dimensionless_unscaled
dv = d.value.copy()
np.square.at(d, i)
np.square.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
d = np.arange(10.) * u.dimensionless_unscaled
dv = d.value.copy()
np.log.at(d, i)
np.log.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
# also for sine it doesn't work, even if given an angle
a = np.arange(10.) * u.radian
with pytest.raises(u.UnitsError):
np.sin.at(a, i)
# except, for consistency, if we have made radian equivalent to
# dimensionless (though hopefully it will never be needed)
av = a.value.copy()
with u.add_enabled_equivalencies(u.dimensionless_angles()):
np.sin.at(a, i)
np.sin.at(av, i)
assert_allclose(a.value, av)
# but we won't do double conversion
ad = np.arange(10.) * u.degree
with pytest.raises(u.UnitsError):
np.sin.at(ad, i)
def test_two_argument_ufunc_at(self):
s = np.arange(10.) * u.m
i = np.array([1, 2])
check = s.value.copy()
np.add.at(s, i, 1.*u.km)
np.add.at(check, i, 1000.)
assert np.all(s.value == check)
assert s.unit is u.m
with pytest.raises(u.UnitsError):
np.add.at(s, i, 1.*u.s)
# also raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.at(s, i, 1*u.s)
# but be fine if it does not
s = np.arange(10.) * u.m
check = s.value.copy()
np.multiply.at(s, i, 2.*u.dimensionless_unscaled)
np.multiply.at(check, i, 2)
assert np.all(s.value == check)
s = np.arange(10.) * u.m
np.multiply.at(s, i, 2.)
assert np.all(s.value == check)
# of course cannot change class of data either
with pytest.raises(TypeError):
np.greater.at(s, i, 1.*u.km)
class TestUfuncReduceReduceatAccumulate:
"""Test 'reduce', 'reduceat' and 'accumulate' methods for ufuncs
For Quantities, it makes sense only if the unit does not have to change
"""
def test_one_argument_ufunc_reduce_accumulate(self):
# one argument cannot be used
s = np.arange(10.) * u.radian
i = np.array([0, 5, 1, 6])
with pytest.raises(ValueError):
np.sin.reduce(s)
with pytest.raises(ValueError):
np.sin.accumulate(s)
with pytest.raises(ValueError):
np.sin.reduceat(s, i)
def test_two_argument_ufunc_reduce_accumulate(self):
s = np.arange(10.) * u.m
i = np.array([0, 5, 1, 6])
check = s.value.copy()
s_add_reduce = np.add.reduce(s)
check_add_reduce = np.add.reduce(check)
assert s_add_reduce.value == check_add_reduce
assert s_add_reduce.unit is u.m
s_add_accumulate = np.add.accumulate(s)
check_add_accumulate = np.add.accumulate(check)
assert np.all(s_add_accumulate.value == check_add_accumulate)
assert s_add_accumulate.unit is u.m
s_add_reduceat = np.add.reduceat(s, i)
check_add_reduceat = np.add.reduceat(check, i)
assert np.all(s_add_reduceat.value == check_add_reduceat)
assert s_add_reduceat.unit is u.m
# reduce(at) or accumulate on comparisons makes no sense,
# as intermediate result is not even a Quantity
with pytest.raises(TypeError):
np.greater.reduce(s)
with pytest.raises(TypeError):
np.greater.accumulate(s)
with pytest.raises(TypeError):
np.greater.reduceat(s, i)
# raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.reduce(s)
with pytest.raises(u.UnitsError):
np.multiply.accumulate(s)
with pytest.raises(u.UnitsError):
np.multiply.reduceat(s, i)
# but be fine if it does not
s = np.arange(10.) * u.dimensionless_unscaled
check = s.value.copy()
s_multiply_reduce = np.multiply.reduce(s)
check_multiply_reduce = np.multiply.reduce(check)
assert s_multiply_reduce.value == check_multiply_reduce
assert s_multiply_reduce.unit is u.dimensionless_unscaled
s_multiply_accumulate = np.multiply.accumulate(s)
check_multiply_accumulate = np.multiply.accumulate(check)
assert np.all(s_multiply_accumulate.value == check_multiply_accumulate)
assert s_multiply_accumulate.unit is u.dimensionless_unscaled
s_multiply_reduceat = np.multiply.reduceat(s, i)
check_multiply_reduceat = np.multiply.reduceat(check, i)
assert np.all(s_multiply_reduceat.value == check_multiply_reduceat)
assert s_multiply_reduceat.unit is u.dimensionless_unscaled
class TestUfuncOuter:
"""Test 'outer' methods for ufuncs
Just a few spot checks, since it uses the same code as the regular
ufunc call
"""
def test_one_argument_ufunc_outer(self):
# one argument cannot be used
s = np.arange(10.) * u.radian
with pytest.raises(ValueError):
np.sin.outer(s)
def test_two_argument_ufunc_outer(self):
s1 = np.arange(10.) * u.m
s2 = np.arange(2.) * u.s
check1 = s1.value
check2 = s2.value
s12_multiply_outer = np.multiply.outer(s1, s2)
check12_multiply_outer = np.multiply.outer(check1, check2)
assert np.all(s12_multiply_outer.value == check12_multiply_outer)
assert s12_multiply_outer.unit == s1.unit * s2.unit
# raise UnitsError if appropriate
with pytest.raises(u.UnitsError):
np.add.outer(s1, s2)
# but be fine if it does not
s3 = np.arange(2.) * s1.unit
check3 = s3.value
s13_add_outer = np.add.outer(s1, s3)
check13_add_outer = np.add.outer(check1, check3)
assert np.all(s13_add_outer.value == check13_add_outer)
assert s13_add_outer.unit is s1.unit
s13_greater_outer = np.greater.outer(s1, s3)
check13_greater_outer = np.greater.outer(check1, check3)
assert type(s13_greater_outer) is np.ndarray
assert np.all(s13_greater_outer == check13_greater_outer)
if HAS_SCIPY:
from scipy import special as sps
class TestScipySpecialUfuncs:
erf_like_ufuncs = (
sps.erf, sps.gamma, sps.loggamma, sps.gammasgn, sps.psi,
sps.rgamma, sps.erfc, sps.erfcx, sps.erfi, sps.wofz, sps.dawsn,
sps.entr, sps.exprel, sps.expm1, sps.log1p, sps.exp2, sps.exp10)
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_scalar(self, function):
TestQuantityMathFuncs.test_exp_scalar(None, function)
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_array(self, function):
TestQuantityMathFuncs.test_exp_array(None, function)
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_invalid_units(self, function):
TestQuantityMathFuncs.test_exp_invalid_units(None, function)
@pytest.mark.parametrize('function', (sps.cbrt, ))
def test_cbrt_scalar(self, function):
TestQuantityMathFuncs.test_cbrt_scalar(None, function)
@pytest.mark.parametrize('function', (sps.cbrt, ))
def test_cbrt_array(self, function):
TestQuantityMathFuncs.test_cbrt_array(None, function)
@pytest.mark.parametrize('function', (sps.radian, ))
def test_radian(self, function):
q1 = function(180. * u.degree, 0. * u.arcmin, 0. * u.arcsec)
assert_allclose(q1.value, np.pi)
assert q1.unit == u.radian
q2 = function(0. * u.degree, 30. * u.arcmin, 0. * u.arcsec)
assert_allclose(q2.value, (30. * u.arcmin).to(u.radian).value)
assert q2.unit == u.radian
q3 = function(0. * u.degree, 0. * u.arcmin, 30. * u.arcsec)
assert_allclose(q3.value, (30. * u.arcsec).to(u.radian).value)
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q4 = function(3. * u.radian, 0. * u.arcmin, 0. * u.arcsec)
assert_allclose(q4.value, 3.)
assert q4.unit == u.radian
with pytest.raises(TypeError):
function(3. * u.m, 2. * u.s, 1. * u.kg)
jv_like_ufuncs = (
sps.jv, sps.jn, sps.jve, sps.yn, sps.yv, sps.yve, sps.kn, sps.kv,
sps.kve, sps.iv, sps.ive, sps.hankel1, sps.hankel1e, sps.hankel2,
sps.hankel2e)
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_scalar(self, function):
q = function(2. * u.m / (2. * u.m), 3. * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(1.0, 0.5)
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_array(self, function):
q = function(np.ones(3) * u.m / (1. * u.m),
np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == function(
np.ones(3),
np.array([1. / 3., 1. / 2., 1.]))
)
# should also work on quantities that can be made dimensionless
q2 = function(np.ones(3) * u.m / (1. * u.m),
np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value,
function(np.ones(3),
np.array([100. / 3., 100. / 2., 100.])))
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_invalid_units(self, function):
# Can't use jv() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
function(1. * u.kg, 3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{0}' function to "
"dimensionless quantities"
.format(function.__name__))
|
5ef4da387f275c281837c2fd2bfea627fe815b06dfe67d6f0b080e68a9fdb6e3 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from ... import units as u
# list of pairs (target unit/physical type, input unit)
x_inputs = [(u.arcsec, u.deg), ('angle', u.deg),
(u.kpc/u.Myr, u.km/u.s), ('speed', u.km/u.s),
([u.arcsec, u.km], u.deg), ([u.arcsec, u.km], u.km), # multiple allowed
(['angle', 'length'], u.deg), (['angle', 'length'], u.km)]
y_inputs = [(u.arcsec, u.deg), ('angle', u.deg),
(u.kpc/u.Myr, u.km/u.s), ('speed', u.km/u.s)]
@pytest.fixture(scope="module",
params=list(range(len(x_inputs))))
def x_input(request):
return x_inputs[request.param]
@pytest.fixture(scope="module",
params=list(range(len(y_inputs))))
def y_input(request):
return y_inputs[request.param]
# ---- Tests that use the fixtures defined above ----
def test_args(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1*x_unit, 1*y_unit)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_args_nonquantity(x_input):
x_target, x_unit = x_input
@u.quantity_input(x=x_target)
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1*x_unit, 100)
assert isinstance(x, u.Quantity)
assert isinstance(y, int)
assert x.unit == x_unit
def test_wrong_unit(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
with pytest.raises(u.UnitsError) as e:
x, y = myfunc_args(1*x_unit, 100*u.Joule) # has to be an unspecified unit
str_to = str(y_target)
assert str(e.value) == "Argument 'y' to function 'myfunc_args' must be in units convertible to '{0}'.".format(str_to)
def test_not_quantity(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
with pytest.raises(TypeError) as e:
x, y = myfunc_args(1*x_unit, 100)
assert str(e.value) == "Argument 'y' to function 'myfunc_args' has no 'unit' attribute. You may want to pass in an astropy Quantity instead."
def test_kwargs(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, my_arg, y=1*y_unit):
return x, my_arg, y
x, my_arg, y = myfunc_args(1*x_unit, 100, y=100*y_unit)
assert isinstance(x, u.Quantity)
assert isinstance(my_arg, int)
assert isinstance(y, u.Quantity)
assert y.unit == y_unit
def test_unused_kwargs(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, my_arg1, y=y_unit, my_arg2=1000):
return x, my_arg1, y, my_arg2
x, my_arg1, y, my_arg2 = myfunc_args(1*x_unit, 100,
y=100*y_unit, my_arg2=10)
assert isinstance(x, u.Quantity)
assert isinstance(my_arg1, int)
assert isinstance(y, u.Quantity)
assert isinstance(my_arg2, int)
assert y.unit == y_unit
assert my_arg2 == 10
def test_kwarg_wrong_unit(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10*y_unit):
return x, y
with pytest.raises(u.UnitsError) as e:
x, y = myfunc_args(1*x_unit, y=100*u.Joule)
str_to = str(y_target)
assert str(e.value) == "Argument 'y' to function 'myfunc_args' must be in units convertible to '{0}'.".format(str_to)
def test_kwarg_not_quantity(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10*y_unit):
return x, y
with pytest.raises(TypeError) as e:
x, y = myfunc_args(1*x_unit, y=100)
assert str(e.value) == "Argument 'y' to function 'myfunc_args' has no 'unit' attribute. You may want to pass in an astropy Quantity instead."
def test_kwarg_default(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10*y_unit):
return x, y
x, y = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_kwargs_input(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x=1*x_unit, y=1*y_unit):
return x, y
kwargs = {'x': 10*x_unit, 'y': 10*y_unit}
x, y = myfunc_args(**kwargs)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_kwargs_extra(x_input):
x_target, x_unit = x_input
@u.quantity_input(x=x_target)
def myfunc_args(x, **kwargs):
return x
x = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
# ---- Tests that don't used the fixtures ----
@pytest.mark.parametrize("x_unit,y_unit", [
(u.arcsec, u.eV),
('angle', 'energy')])
def test_arg_equivalencies(x_unit, y_unit):
@u.quantity_input(x=x_unit, y=y_unit,
equivalencies=u.mass_energy())
def myfunc_args(x, y):
return x, y+(10*u.J) # Add an energy to check equiv is working
x, y = myfunc_args(1*u.arcsec, 100*u.gram)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == u.arcsec
assert y.unit == u.gram
@pytest.mark.parametrize("x_unit,energy_unit", [
(u.arcsec, u.eV),
('angle', 'energy')])
def test_kwarg_equivalencies(x_unit, energy_unit):
@u.quantity_input(x=x_unit, energy=energy_unit, equivalencies=u.mass_energy())
def myfunc_args(x, energy=10*u.eV):
return x, energy+(10*u.J) # Add an energy to check equiv is working
x, energy = myfunc_args(1*u.arcsec, 100*u.gram)
assert isinstance(x, u.Quantity)
assert isinstance(energy, u.Quantity)
assert x.unit == u.arcsec
assert energy.unit == u.gram
def test_no_equivalent():
class test_unit:
pass
class test_quantity:
unit = test_unit()
@u.quantity_input(x=u.arcsec)
def myfunc_args(x):
return x
with pytest.raises(TypeError) as e:
x, y = myfunc_args(test_quantity())
assert str(e.value) == "Argument 'x' to function 'myfunc_args' has a 'unit' attribute without an 'is_equivalent' method. You may want to pass in an astropy Quantity instead."
def test_kwarg_invalid_physical_type():
@u.quantity_input(x='angle', y='africanswallow')
def myfunc_args(x, y=10*u.deg):
return x, y
with pytest.raises(ValueError) as e:
x, y = myfunc_args(1*u.arcsec, y=100*u.deg)
assert str(e.value) == "Invalid unit or physical type 'africanswallow'."
def test_default_value_check():
x_target = u.deg
x_unit = u.arcsec
with pytest.raises(TypeError):
@u.quantity_input(x=x_target)
def myfunc_args(x=1.):
return x
x = myfunc_args()
x = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
def test_args_None():
x_target = u.deg
x_unit = u.arcsec
y_target = u.km
y_unit = u.kpc
@u.quantity_input(x=[x_target, None], y=[None, y_target])
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1*x_unit, None)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
x, y = myfunc_args(None, 1*y_unit)
assert isinstance(y, u.Quantity)
assert y.unit == y_unit
assert x is None
def test_args_None_kwarg():
x_target = u.deg
x_unit = u.arcsec
y_target = u.km
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=None):
return x, y
x, y = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
x, y = myfunc_args(1*x_unit, None)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
with pytest.raises(TypeError):
x, y = myfunc_args(None, None)
|
131e41fb57bac0e6f60a7bc08fdd3508bd96bf511e205cb11f97ae182c0a4bfb | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Function Units and Quantities."""
from abc import ABCMeta, abstractmethod
import numpy as np
from .. import (Unit, UnitBase, UnitsError, UnitTypeError,
dimensionless_unscaled, Quantity)
__all__ = ['FunctionUnitBase', 'FunctionQuantity']
SUPPORTED_UFUNCS = set(getattr(np.core.umath, ufunc) for ufunc in (
'isfinite', 'isinf', 'isnan', 'sign', 'signbit',
'rint', 'floor', 'ceil', 'trunc', 'power',
'_ones_like', 'ones_like', 'positive') if hasattr(np.core.umath, ufunc))
# TODO: the following could work if helper changed relative to Quantity:
# - spacing should return dimensionless, not same unit
# - negative should negate unit too,
# - add, subtract, comparisons can work if units added/subtracted
SUPPORTED_FUNCTIONS = set(getattr(np, function) for function in
('clip', 'trace', 'mean', 'min', 'max', 'round'))
# subclassing UnitBase or CompositeUnit was found to be problematic, requiring
# a large number of overrides. Hence, define new class.
class FunctionUnitBase(metaclass=ABCMeta):
"""Abstract base class for function units.
Function units are functions containing a physical unit, such as dB(mW).
Most of the arithmetic operations on function units are defined in this
base class.
While instantiation is defined, this class should not be used directly.
Rather, subclasses should be used that override the abstract properties
`_default_function_unit` and `_quantity_class`, and the abstract methods
`from_physical`, and `to_physical`.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, the same as the function unit set by the subclass.
"""
# ↓↓↓ the following four need to be set by subclasses
# Make this a property so we can ensure subclasses define it.
@property
@abstractmethod
def _default_function_unit(self):
"""Default function unit corresponding to the function.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.mag`.
"""
# This has to be a property because the function quantity will not be
# known at unit definition time, as it gets defined after.
@property
@abstractmethod
def _quantity_class(self):
"""Function quantity class corresponding to this function unit.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.Magnitude`.
"""
@abstractmethod
def from_physical(self, x):
"""Transformation from value in physical to value in function units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
@abstractmethod
def to_physical(self, x):
"""Transformation from value in function to value in physical units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
# ↑↑↑ the above four need to be set by subclasses
# have priority over arrays, regular units, and regular quantities
__array_priority__ = 30000
def __init__(self, physical_unit=None, function_unit=None):
if physical_unit is None:
self._physical_unit = dimensionless_unscaled
else:
self._physical_unit = Unit(physical_unit)
if (not isinstance(self._physical_unit, UnitBase) or
self._physical_unit.is_equivalent(
self._default_function_unit)):
raise ValueError("Unit {0} is not a physical unit."
.format(self._physical_unit))
if function_unit is None:
self._function_unit = self._default_function_unit
else:
# any function unit should be equivalent to subclass default
function_unit = Unit(getattr(function_unit, 'function_unit',
function_unit))
if function_unit.is_equivalent(self._default_function_unit):
self._function_unit = function_unit
else:
raise ValueError("Cannot initialize '{0}' instance with "
"function unit '{1}', as it is not "
"equivalent to default function unit '{2}'."
.format(self.__class__.__name__,
function_unit,
self._default_function_unit))
def _copy(self, physical_unit=None):
"""Copy oneself, possibly with a different physical unit."""
if physical_unit is None:
physical_unit = self.physical_unit
return self.__class__(physical_unit, self.function_unit)
@property
def physical_unit(self):
return self._physical_unit
@property
def function_unit(self):
return self._function_unit
@property
def equivalencies(self):
"""List of equivalencies between function and physical units.
Uses the `from_physical` and `to_physical` methods.
"""
return [(self, self.physical_unit,
self.to_physical, self.from_physical)]
# ↓↓↓ properties/methods required to behave like a unit
def decompose(self, bases=set()):
"""Copy the current unit with the physical unit decomposed.
For details, see `~astropy.units.UnitBase.decompose`.
"""
return self._copy(self.physical_unit.decompose(bases))
@property
def si(self):
"""Copy the current function unit with the physical unit in SI."""
return self._copy(self.physical_unit.si)
@property
def cgs(self):
"""Copy the current function unit with the physical unit in CGS."""
return self._copy(self.physical_unit.cgs)
def _get_physical_type_id(self):
"""Get physical type corresponding to physical unit."""
return self.physical_unit._get_physical_type_id()
@property
def physical_type(self):
"""Return the physical type of the physical unit (e.g., 'length')."""
return self.physical_unit.physical_type
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : unit object or string or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
This list is in addition to the built-in equivalencies between the
function unit and the physical one, as well as possible global
defaults set by, e.g., `~astropy.units.set_enabled_equivalencies`.
Use `None` to turn off any global equivalencies.
Returns
-------
bool
"""
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies=equivalencies)
for u in other)
other_physical_unit = getattr(other, 'physical_unit', (
dimensionless_unscaled if self.function_unit.is_equivalent(other)
else other))
return self.physical_unit.is_equivalent(other_physical_unit,
equivalencies)
def to(self, other, value=1., equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : `~astropy.units.Unit` object, `~astropy.units.function.FunctionUnitBase` object or string
The unit to convert to.
value : scalar int or float, or sequence convertible to array, optional
Value(s) in the current unit to be converted to the specified unit.
If not provided, defaults to 1.0.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
This list is in meant to treat only equivalencies between different
physical units; the build-in equivalency between the function
unit and the physical one is automatically taken into account.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
UnitsError
If units are inconsistent.
"""
# conversion to one's own physical unit should be fastest
if other is self.physical_unit:
return self.to_physical(value)
other_function_unit = getattr(other, 'function_unit', other)
if self.function_unit.is_equivalent(other_function_unit):
# when other is an equivalent function unit:
# first convert physical units to other's physical units
other_physical_unit = getattr(other, 'physical_unit',
dimensionless_unscaled)
if self.physical_unit != other_physical_unit:
value_other_physical = self.physical_unit.to(
other_physical_unit, self.to_physical(value),
equivalencies)
# make function unit again, in own system
value = self.from_physical(value_other_physical)
# convert possible difference in function unit (e.g., dex->dB)
return self.function_unit.to(other_function_unit, value)
else:
# when other is not a function unit
return self.physical_unit.to(other, self.to_physical(value),
equivalencies)
def is_unity(self):
return False
def __eq__(self, other):
return (self.physical_unit == getattr(other, 'physical_unit',
dimensionless_unscaled) and
self.function_unit == getattr(other, 'function_unit', other))
def __ne__(self, other):
return not self.__eq__(other)
def __mul__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit * other
else:
raise UnitsError("Cannot multiply a function unit "
"with a physical dimension with any unit.")
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(other, unit=self)
except Exception:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit / other
else:
raise UnitsError("Cannot divide a function unit "
"with a physical dimension by any unit.")
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(1./other, unit=self)
except Exception:
return NotImplemented
def __rdiv__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return other / self.function_unit
else:
raise UnitsError("Cannot divide a function unit "
"with a physical dimension into any unit")
else:
# Don't know what to do with anything not like a unit.
return NotImplemented
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __pow__(self, power):
if power == 0:
return dimensionless_unscaled
elif power == 1:
return self._copy()
if self.physical_unit == dimensionless_unscaled:
return self.function_unit ** power
raise UnitsError("Cannot raise a function unit "
"with a physical dimension to any power but 0 or 1.")
def __pos__(self):
return self._copy()
def to_string(self, format='generic'):
"""
Output the unit in the given format as a string.
The physical unit is appended, within parentheses, to the function
unit, as in "dB(mW)", with both units set using the given format
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
"""
if format not in ('generic', 'unscaled', 'latex'):
raise ValueError("Function units cannot be written in {0} format. "
"Only 'generic', 'unscaled' and 'latex' are "
"supported.".format(format))
self_str = self.function_unit.to_string(format)
pu_str = self.physical_unit.to_string(format)
if pu_str == '':
pu_str = '1'
if format == 'latex':
self_str += r'$\mathrm{{\left( {0} \right)}}$'.format(
pu_str[1:-1]) # need to strip leading and trailing "$"
else:
self_str += '({0})'.format(pu_str)
return self_str
def __str__(self):
"""Return string representation for unit."""
self_str = str(self.function_unit)
pu_str = str(self.physical_unit)
if pu_str:
self_str += '({0})'.format(pu_str)
return self_str
def __repr__(self):
# By default, try to give a representation using `Unit(<string>)`,
# with string such that parsing it would give the correct FunctionUnit.
if callable(self.function_unit):
return 'Unit("{0}")'.format(self.to_string())
else:
return '{0}("{1}"{2})'.format(
self.__class__.__name__, self.physical_unit,
"" if self.function_unit is self._default_function_unit
else ', unit="{0}"'.format(self.function_unit))
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return self.to_string('latex')
def __hash__(self):
return hash((self.function_unit, self.physical_unit))
class FunctionQuantity(Quantity):
"""A representation of a (scaled) function of a number with a unit.
Function quantities are quantities whose units are functions containing a
physical unit, such as dB(mW). Most of the arithmetic operations on
function quantities are defined in this base class.
While instantiation is also defined here, this class should not be
instantiated directly. Rather, subclasses should be made which have
``_unit_class`` pointing back to the corresponding function unit class.
Parameters
----------
value : number, sequence of convertible items, `~astropy.units.Quantity`, or `~astropy.units.function.FunctionQuantity`
The numerical value of the function quantity. If a number or
a `~astropy.units.Quantity` with a function unit, it will be converted
to ``unit`` and the physical unit will be inferred from ``unit``.
If a `~astropy.units.Quantity` with just a physical unit, it will
converted to the function unit, after, if necessary, converting it to
the physical unit inferred from ``unit``.
unit : string, `~astropy.units.UnitBase` or `~astropy.units.function.FunctionUnitBase` instance, optional
For an `~astropy.units.function.FunctionUnitBase` instance, the
physical unit will be taken from it; for other input, it will be
inferred from ``value``. By default, ``unit`` is set by the subclass.
dtype : `~numpy.dtype`, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any input that cannot represent float (integer and bool)
is converted to float.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. Ignored
if the input does not need to be converted and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be of the
class used. Otherwise, subclasses will be passed through.
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be pre-pended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`~astropy.units.Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not a `~astropy.units.function.FunctionUnitBase`
or `~astropy.units.Unit` object, or a parseable string unit.
"""
_unit_class = None
"""Default `~astropy.units.function.FunctionUnitBase` subclass.
This should be overridden by subclasses.
"""
# Ensure priority over ndarray, regular Unit & Quantity, and FunctionUnit.
__array_priority__ = 40000
# Define functions that work on FunctionQuantity.
_supported_ufuncs = SUPPORTED_UFUNCS
_supported_functions = SUPPORTED_FUNCTIONS
def __new__(cls, value, unit=None, dtype=None, copy=True, order=None,
subok=False, ndmin=0):
if unit is not None:
# Convert possible string input to a (function) unit.
unit = Unit(unit)
if not isinstance(unit, FunctionUnitBase):
# By default, use value's physical unit.
value_unit = getattr(value, 'unit', None)
if value_unit is None:
# if iterable, see if first item has a unit
# (mixed lists fail in super call below).
try:
value_unit = getattr(value[0], 'unit')
except Exception:
pass
physical_unit = getattr(value_unit, 'physical_unit', value_unit)
unit = cls._unit_class(physical_unit, function_unit=unit)
# initialise!
return super().__new__(cls, value, unit, dtype=dtype, copy=copy,
order=order, subok=subok, ndmin=ndmin)
# ↓↓↓ properties not found in Quantity
@property
def physical(self):
"""The physical quantity corresponding the function one."""
return self.to(self.unit.physical_unit)
@property
def _function_view(self):
"""View as Quantity with function unit, dropping the physical unit.
Use `~astropy.units.quantity.Quantity.value` for just the value.
"""
return self._new_view(unit=self.unit.function_unit)
# ↓↓↓ methods overridden to change the behaviour
@property
def si(self):
"""Return a copy with the physical unit in SI units."""
return self.__class__(self.physical.si)
@property
def cgs(self):
"""Return a copy with the physical unit in CGS units."""
return self.__class__(self.physical.cgs)
def decompose(self, bases=[]):
"""Generate a new `FunctionQuantity` with the physical unit decomposed.
For details, see `~astropy.units.Quantity.decompose`.
"""
return self.__class__(self.physical.decompose(bases))
# ↓↓↓ methods overridden to add additional behaviour
def __quantity_subclass__(self, unit):
if isinstance(unit, FunctionUnitBase):
return self.__class__, True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if not isinstance(unit, self._unit_class):
# Have to take care of, e.g., (10*u.mag).view(u.Magnitude)
try:
# "or 'nonsense'" ensures `None` breaks, just in case.
unit = self._unit_class(function_unit=unit or 'nonsense')
except Exception:
raise UnitTypeError(
"{0} instances require {1} function units"
.format(type(self).__name__, self._unit_class.__name__) +
", so cannot set it to '{0}'.".format(unit))
self._unit = unit
# ↓↓↓ methods overridden to change behaviour
def __mul__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._function_view * other
raise UnitTypeError("Cannot multiply function quantities which "
"are not dimensionless with anything.")
def __truediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._function_view / other
raise UnitTypeError("Cannot divide function quantities which "
"are not dimensionless by anything.")
def __rtruediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._function_view.__rdiv__(other)
raise UnitTypeError("Cannot divide function quantities which "
"are not dimensionless into anything.")
def _comparison(self, other, comparison_func):
"""Do a comparison between self and other, raising UnitsError when
other cannot be converted to self because it has different physical
unit, and returning NotImplemented when there are other errors."""
try:
# will raise a UnitsError if physical units not equivalent
other_in_own_unit = self._to_own_unit(other, check_precision=False)
except UnitsError as exc:
if self.unit.physical_unit != dimensionless_unscaled:
raise exc
try:
other_in_own_unit = self._function_view._to_own_unit(
other, check_precision=False)
except Exception:
raise exc
except Exception:
return NotImplemented
return comparison_func(other_in_own_unit)
def __eq__(self, other):
try:
return self._comparison(other, self.value.__eq__)
except UnitsError:
return False
def __ne__(self, other):
try:
return self._comparison(other, self.value.__ne__)
except UnitsError:
return True
def __gt__(self, other):
return self._comparison(other, self.value.__gt__)
def __ge__(self, other):
return self._comparison(other, self.value.__ge__)
def __lt__(self, other):
return self._comparison(other, self.value.__lt__)
def __le__(self, other):
return self._comparison(other, self.value.__le__)
# Ensure Quantity methods are used only if they make sense.
def _wrap_function(self, function, *args, **kwargs):
if function in self._supported_functions:
return super()._wrap_function(function, *args, **kwargs)
# For dimensionless, we can convert to regular quantities.
if all(arg.unit.physical_unit == dimensionless_unscaled
for arg in (self,) + args
if (hasattr(arg, 'unit') and
hasattr(arg.unit, 'physical_unit'))):
args = tuple(getattr(arg, '_function_view', arg) for arg in args)
return self._function_view._wrap_function(function, *args, **kwargs)
raise TypeError("Cannot use method that uses function '{0}' with "
"function quantities that are not dimensionless."
.format(function.__name__))
|
30873f4b7a3d556a6975f81b4d375a192bc35801166c254a2546ae877ecbb425 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains the coordinate frames actually implemented by astropy.
Users shouldn't use this module directly, but rather import from the
`astropy.coordinates` module. While it is likely to exist for the long-term,
the existence of this package and details of its organization should be
considered an implementation detail, and is not guaranteed to hold for future
versions of astropy.
Notes
-----
The builtin frame classes are all imported automatically into this package's
namespace, so there's no need to access the sub-modules directly.
To implement a new frame in Astropy, a developer should add the frame as a new
module in this package. Any "self" transformations (i.e., those that transform
from one frame to another frame of the same class) should be included in that
module. Transformation functions connecting the new frame to other frames
should be in a separate module, which should be imported in this package's
``__init__.py`` to ensure the transformations are hooked up when this package is
imported. Placing the trasnformation functions in separate modules avoids
circular dependencies, because they need references to the frame classes.
"""
from .baseradec import BaseRADecFrame
from .icrs import ICRS
from .fk5 import FK5
from .fk4 import FK4, FK4NoETerms
from .galactic import Galactic
from .galactocentric import Galactocentric
from .lsr import LSR, GalacticLSR
from .supergalactic import Supergalactic
from .altaz import AltAz
from .gcrs import GCRS, PrecessedGeocentric
from .cirs import CIRS
from .itrs import ITRS
from .hcrs import HCRS
from .ecliptic import (GeocentricTrueEcliptic, BarycentricTrueEcliptic,
HeliocentricTrueEcliptic, BaseEclipticFrame)
from .skyoffset import SkyOffsetFrame
# need to import transformations so that they get registered in the graph
from . import icrs_fk5_transforms
from . import fk4_fk5_transforms
from . import galactic_transforms
from . import supergalactic_transforms
from . import icrs_cirs_transforms
from . import cirs_observed_transforms
from . import intermediate_rotation_transforms
from . import ecliptic_transforms
from ..baseframe import frame_transform_graph
# we define an __all__ because otherwise the transformation modules
# get included
__all__ = ['ICRS', 'FK5', 'FK4', 'FK4NoETerms', 'Galactic', 'Galactocentric',
'Supergalactic', 'AltAz', 'GCRS', 'CIRS', 'ITRS', 'HCRS',
'PrecessedGeocentric', 'GeocentricTrueEcliptic',
'BarycentricTrueEcliptic', 'HeliocentricTrueEcliptic',
'SkyOffsetFrame', 'GalacticLSR', 'LSR',
'BaseEclipticFrame', 'BaseRADecFrame', 'make_transform_graph_docs']
def make_transform_graph_docs(transform_graph):
"""
Generates a string that can be used in other docstrings to include a
transformation graph, showing the available transforms and
coordinate systems.
Parameters
----------
transform_graph : `~.coordinates.TransformGraph`
Returns
-------
docstring : str
A string that can be added to the end of a docstring to show the
transform graph.
"""
from textwrap import dedent
coosys = [transform_graph.lookup_name(item) for
item in transform_graph.get_names()]
# currently, all of the priorities are set to 1, so we don't need to show
# then in the transform graph.
graphstr = transform_graph.to_dot_graph(addnodes=coosys,
priorities=False)
docstr = """
The diagram below shows all of the coordinate systems built into the
`~astropy.coordinates` package, their aliases (useful for converting
other coordinates to them using attribute-style access) and the
pre-defined transformations between them. The user is free to
override any of these transformations by defining new transformations
between these systems, but the pre-defined transformations should be
sufficient for typical usage.
The color of an edge in the graph (i.e. the transformations between two
frames) is set by the type of transformation; the legend box defines the
mapping from transform class name to color.
.. Wrap the graph in a div with a custom class to allow themeing.
.. container:: frametransformgraph
.. graphviz::
"""
docstr = dedent(docstr) + ' ' + graphstr.replace('\n', '\n ')
# colors are in dictionary at the bottom of transformations.py
from ..transformations import trans_to_color
html_list_items = []
for cls, color in trans_to_color.items():
block = u"""
<li style='list-style: none;'>
<p style="font-size: 12px;line-height: 24px;font-weight: normal;color: #848484;padding: 0;margin: 0;">
<b>{0}:</b>
<span style="font-size: 24px; color: {1};"><b>➝</b></span>
</p>
</li>
""".format(cls.__name__, color)
html_list_items.append(block)
graph_legend = u"""
.. raw:: html
<ul>
{}
</ul>
""".format("\n".join(html_list_items))
docstr = docstr + dedent(graph_legend)
return docstr
_transform_graph_docs = make_transform_graph_docs(frame_transform_graph)
|
b762e36222d3fa5844c841a57ea53bccaf341684946d609ec3fccc9a572e2d03 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains functions/values used repeatedly in different modules of
the ``builtin_frames`` package.
"""
import warnings
import numpy as np
from ... import units as u
from ... import _erfa as erfa
from ...time import Time
from ...utils import iers
from ...utils.exceptions import AstropyWarning
# The UTC time scale is not properly defined prior to 1960, so Time('B1950',
# scale='utc') will emit a warning. Instead, we use Time('B1950', scale='tai')
# which is equivalent, but does not emit a warning.
EQUINOX_J2000 = Time('J2000', scale='utc')
EQUINOX_B1950 = Time('B1950', scale='tai')
# This is a time object that is the default "obstime" when such an attribute is
# necessary. Currently, we use J2000.
DEFAULT_OBSTIME = Time('J2000', scale='utc')
PIOVER2 = np.pi / 2.
# comes from the mean of the 1962-2014 IERS B data
_DEFAULT_PM = (0.035, 0.29)*u.arcsec
def get_polar_motion(time):
"""
gets the two polar motion components in radians for use with apio13
"""
# Get the polar motion from the IERS table
xp, yp, status = iers.IERS_Auto.open().pm_xy(time, return_status=True)
wmsg = None
if np.any(status == iers.TIME_BEFORE_IERS_RANGE):
wmsg = ('Tried to get polar motions for times before IERS data is '
'valid. Defaulting to polar motion from the 50-yr mean for those. '
'This may affect precision at the 10s of arcsec level')
xp.ravel()[status.ravel() == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[0]
yp.ravel()[status.ravel() == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[1]
warnings.warn(wmsg, AstropyWarning)
if np.any(status == iers.TIME_BEYOND_IERS_RANGE):
wmsg = ('Tried to get polar motions for times after IERS data is '
'valid. Defaulting to polar motion from the 50-yr mean for those. '
'This may affect precision at the 10s of arcsec level')
xp.ravel()[status.ravel() == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[0]
yp.ravel()[status.ravel() == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[1]
warnings.warn(wmsg, AstropyWarning)
return xp.to_value(u.radian), yp.to_value(u.radian)
def _warn_iers(ierserr):
"""
Generate a warning for an IERSRangeerror
Parameters
----------
ierserr : An `~astropy.utils.iers.IERSRangeError`
"""
msg = '{0} Assuming UT1-UTC=0 for coordinate transformations.'
warnings.warn(msg.format(ierserr.args[0]), AstropyWarning)
def get_dut1utc(time):
"""
This function is used to get UT1-UTC in coordinates because normally it
gives an error outside the IERS range, but in coordinates we want to allow
it to go through but with a warning.
"""
try:
return time.delta_ut1_utc
except iers.IERSRangeError as e:
_warn_iers(e)
return np.zeros(time.shape)
def get_jd12(time, scale):
"""
Gets ``jd1`` and ``jd2`` from a time object in a particular scale.
Parameters
----------
time : `~astropy.time.Time`
The time to get the jds for
scale : str
The time scale to get the jds for
Returns
-------
jd1 : float
jd2 : float
"""
if time.scale == scale:
newtime = time
else:
try:
newtime = getattr(time, scale)
except iers.IERSRangeError as e:
_warn_iers(e)
newtime = time
return newtime.jd1, newtime.jd2
def norm(p):
"""
Normalise a p-vector.
"""
if np.__version__ == '1.14.0':
# there is a bug in numpy v1.14.0 (fixed in 1.14.1) that causes
# this einsum call to break with the default of optimize=True
# see https://github.com/astropy/astropy/issues/7051
return p / np.sqrt(np.einsum('...i,...i', p, p, optimize=False))[..., np.newaxis]
else:
return p / np.sqrt(np.einsum('...i,...i', p, p))[..., np.newaxis]
def get_cip(jd1, jd2):
"""
Find the X, Y coordinates of the CIP and the CIO locator, s.
Parameters
----------
jd1 : float or `np.ndarray`
First part of two part Julian date (TDB)
jd2 : float or `np.ndarray`
Second part of two part Julian date (TDB)
Returns
--------
x : float or `np.ndarray`
x coordinate of the CIP
y : float or `np.ndarray`
y coordinate of the CIP
s : float or `np.ndarray`
CIO locator, s
"""
# classical NPB matrix, IAU 2006/2000A
rpnb = erfa.pnm06a(jd1, jd2)
# CIP X, Y coordinates from array
x, y = erfa.bpn2xy(rpnb)
# CIO locator, s
s = erfa.s06(jd1, jd2, x, y)
return x, y, s
def aticq(ri, di, astrom):
"""
A slightly modified version of the ERFA function ``eraAticq``.
``eraAticq`` performs the transformations between two coordinate systems,
with the details of the transformation being encoded into the ``astrom`` array.
The companion function ``eraAtciqz`` is meant to be its inverse. However, this
is not true for directions close to the Solar centre, since the light deflection
calculations are numerically unstable and therefore not reversible.
This version sidesteps that problem by artificially reducing the light deflection
for directions which are within 90 arcseconds of the Sun's position. This is the
same approach used by the ERFA functions above, except that they use a threshold of
9 arcseconds.
Parameters
----------
ri : float or `~numpy.ndarray`
right ascension, radians
di : float or `~numpy.ndarray`
declination, radians
astrom : eraASTROM array
ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13``
Returns
--------
rc : float or `~numpy.ndarray`
dc : float or `~numpy.ndarray`
"""
# RA, Dec to cartesian unit vectors
pos = erfa.s2c(ri, di)
# Bias-precession-nutation, giving GCRS proper direction.
ppr = erfa.trxp(astrom['bpn'], pos)
# Aberration, giving GCRS natural direction
d = np.zeros_like(ppr)
for j in range(2):
before = norm(ppr-d)
after = erfa.ab(before, astrom['v'], astrom['em'], astrom['bm1'])
d = after - before
pnat = norm(ppr-d)
# Light deflection by the Sun, giving BCRS coordinate direction
d = np.zeros_like(pnat)
for j in range(5):
before = norm(pnat-d)
after = erfa.ld(1.0, before, before, astrom['eh'], astrom['em'], 5e-8)
d = after - before
pco = norm(pnat-d)
# ICRS astrometric RA, Dec
rc, dc = erfa.c2s(pco)
return erfa.anp(rc), dc
def atciqz(rc, dc, astrom):
"""
A slightly modified version of the ERFA function ``eraAtciqz``.
``eraAtciqz`` performs the transformations between two coordinate systems,
with the details of the transformation being encoded into the ``astrom`` array.
The companion function ``eraAticq`` is meant to be its inverse. However, this
is not true for directions close to the Solar centre, since the light deflection
calculations are numerically unstable and therefore not reversible.
This version sidesteps that problem by artificially reducing the light deflection
for directions which are within 90 arcseconds of the Sun's position. This is the
same approach used by the ERFA functions above, except that they use a threshold of
9 arcseconds.
Parameters
----------
rc : float or `~numpy.ndarray`
right ascension, radians
dc : float or `~numpy.ndarray`
declination, radians
astrom : eraASTROM array
ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13``
Returns
--------
ri : float or `~numpy.ndarray`
di : float or `~numpy.ndarray`
"""
# BCRS coordinate direction (unit vector).
pco = erfa.s2c(rc, dc)
# Light deflection by the Sun, giving BCRS natural direction.
pnat = erfa.ld(1.0, pco, pco, astrom['eh'], astrom['em'], 5e-8)
# Aberration, giving GCRS proper direction.
ppr = erfa.ab(pnat, astrom['v'], astrom['em'], astrom['bm1'])
# Bias-precession-nutation, giving CIRS proper direction.
# Has no effect if matrix is identity matrix, in which case gives GCRS ppr.
pi = erfa.rxp(astrom['bpn'], ppr)
# CIRS (GCRS) RA, Dec
ri, di = erfa.c2s(pi)
return erfa.anp(ri), di
def prepare_earth_position_vel(time):
"""
Get barycentric position and velocity, and heliocentric position of Earth
Parameters
-----------
time : `~astropy.time.Time`
time at which to calculate position and velocity of Earth
Returns
--------
earth_pv : `np.ndarray`
Barycentric position and velocity of Earth, in au and au/day
earth_helio : `np.ndarray`
Heliocentric position of Earth in au
"""
# this goes here to avoid circular import errors
from ..solar_system import (get_body_barycentric, get_body_barycentric_posvel)
# get barycentric position and velocity of earth
earth_p, earth_v = get_body_barycentric_posvel('earth', time)
# get heliocentric position of earth, preparing it for passing to erfa.
sun = get_body_barycentric('sun', time)
earth_heliocentric = (earth_p -
sun).get_xyz(xyz_axis=-1).to_value(u.au)
# Also prepare earth_pv for passing to erfa, which wants it as
# a structured dtype.
earth_pv = erfa.pav2pv(
earth_p.get_xyz(xyz_axis=-1).to_value(u.au),
earth_v.get_xyz(xyz_axis=-1).to_value(u.au/u.d))
return earth_pv, earth_heliocentric
|
ce752ed44a7fbe64b64a315e81bd350cbe1ecf7c695ce06216c4b084d7a7b338 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains the transformation functions for getting from ICRS/HCRS to CIRS and
anything in between (currently that means GCRS)
"""
import numpy as np
from ... import units as u
from ..baseframe import frame_transform_graph
from ..transformations import FunctionTransformWithFiniteDifference, AffineTransform
from ..representation import (SphericalRepresentation, CartesianRepresentation,
UnitSphericalRepresentation)
from ... import _erfa as erfa
from .icrs import ICRS
from .gcrs import GCRS
from .cirs import CIRS
from .hcrs import HCRS
from .utils import get_jd12, aticq, atciqz, get_cip, prepare_earth_position_vel
# First the ICRS/CIRS related transforms
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, CIRS)
def icrs_to_cirs(icrs_coo, cirs_frame):
# first set up the astrometry context for ICRS<->CIRS
jd1, jd2 = get_jd12(cirs_frame.obstime, 'tdb')
x, y, s = get_cip(jd1, jd2)
earth_pv, earth_heliocentric = prepare_earth_position_vel(cirs_frame.obstime)
astrom = erfa.apci(jd1, jd2, earth_pv, earth_heliocentric, x, y, s)
if icrs_coo.data.get_name() == 'unitspherical' or icrs_coo.data.to_cartesian().x.unit == u.one:
# if no distance, just do the infinite-distance/no parallax calculation
usrepr = icrs_coo.represent_as(UnitSphericalRepresentation)
i_ra = usrepr.lon.to_value(u.radian)
i_dec = usrepr.lat.to_value(u.radian)
cirs_ra, cirs_dec = atciqz(i_ra, i_dec, astrom)
newrep = UnitSphericalRepresentation(lat=u.Quantity(cirs_dec, u.radian, copy=False),
lon=u.Quantity(cirs_ra, u.radian, copy=False),
copy=False)
else:
# When there is a distance, we first offset for parallax to get the
# astrometric coordinate direction and *then* run the ERFA transform for
# no parallax/PM. This ensures reversibility and is more sensible for
# inside solar system objects
astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au,
xyz_axis=-1, copy=False)
newcart = icrs_coo.cartesian - astrom_eb
srepr = newcart.represent_as(SphericalRepresentation)
i_ra = srepr.lon.to_value(u.radian)
i_dec = srepr.lat.to_value(u.radian)
cirs_ra, cirs_dec = atciqz(i_ra, i_dec, astrom)
newrep = SphericalRepresentation(lat=u.Quantity(cirs_dec, u.radian, copy=False),
lon=u.Quantity(cirs_ra, u.radian, copy=False),
distance=srepr.distance, copy=False)
return cirs_frame.realize_frame(newrep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, ICRS)
def cirs_to_icrs(cirs_coo, icrs_frame):
srepr = cirs_coo.represent_as(SphericalRepresentation)
cirs_ra = srepr.lon.to_value(u.radian)
cirs_dec = srepr.lat.to_value(u.radian)
# set up the astrometry context for ICRS<->cirs and then convert to
# astrometric coordinate direction
jd1, jd2 = get_jd12(cirs_coo.obstime, 'tdb')
x, y, s = get_cip(jd1, jd2)
earth_pv, earth_heliocentric = prepare_earth_position_vel(cirs_coo.obstime)
astrom = erfa.apci(jd1, jd2, earth_pv, earth_heliocentric, x, y, s)
i_ra, i_dec = aticq(cirs_ra, cirs_dec, astrom)
if cirs_coo.data.get_name() == 'unitspherical' or cirs_coo.data.to_cartesian().x.unit == u.one:
# if no distance, just use the coordinate direction to yield the
# infinite-distance/no parallax answer
newrep = UnitSphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
copy=False)
else:
# When there is a distance, apply the parallax/offset to the SSB as the
# last step - ensures round-tripping with the icrs_to_cirs transform
# the distance in intermedrep is *not* a real distance as it does not
# include the offset back to the SSB
intermedrep = SphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
distance=srepr.distance,
copy=False)
astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au,
xyz_axis=-1, copy=False)
newrep = intermedrep + astrom_eb
return icrs_frame.realize_frame(newrep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, CIRS)
def cirs_to_cirs(from_coo, to_frame):
if np.all(from_coo.obstime == to_frame.obstime):
return to_frame.realize_frame(from_coo.data)
else:
# the CIRS<-> CIRS transform actually goes through ICRS. This has a
# subtle implication that a point in CIRS is uniquely determined
# by the corresponding astrometric ICRS coordinate *at its
# current time*. This has some subtle implications in terms of GR, but
# is sort of glossed over in the current scheme because we are dropping
# distances anyway.
return from_coo.transform_to(ICRS).transform_to(to_frame)
# Now the GCRS-related transforms to/from ICRS
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, GCRS)
def icrs_to_gcrs(icrs_coo, gcrs_frame):
# first set up the astrometry context for ICRS<->GCRS. There are a few steps...
# get the position and velocity arrays for the observatory. Need to
# have xyz in last dimension, and pos/vel in one-but-last.
# (Note could use np.stack once our minimum numpy version is >=1.10.)
obs_pv = erfa.pav2pv(
gcrs_frame.obsgeoloc.get_xyz(xyz_axis=-1).to_value(u.m),
gcrs_frame.obsgeovel.get_xyz(xyz_axis=-1).to_value(u.m/u.s))
# find the position and velocity of earth
jd1, jd2 = get_jd12(gcrs_frame.obstime, 'tdb')
earth_pv, earth_heliocentric = prepare_earth_position_vel(gcrs_frame.obstime)
# get astrometry context object, astrom.
astrom = erfa.apcs(jd1, jd2, obs_pv, earth_pv, earth_heliocentric)
if icrs_coo.data.get_name() == 'unitspherical' or icrs_coo.data.to_cartesian().x.unit == u.one:
# if no distance, just do the infinite-distance/no parallax calculation
usrepr = icrs_coo.represent_as(UnitSphericalRepresentation)
i_ra = usrepr.lon.to_value(u.radian)
i_dec = usrepr.lat.to_value(u.radian)
gcrs_ra, gcrs_dec = atciqz(i_ra, i_dec, astrom)
newrep = UnitSphericalRepresentation(lat=u.Quantity(gcrs_dec, u.radian, copy=False),
lon=u.Quantity(gcrs_ra, u.radian, copy=False),
copy=False)
else:
# When there is a distance, we first offset for parallax to get the
# BCRS coordinate direction and *then* run the ERFA transform for no
# parallax/PM. This ensures reversibility and is more sensible for
# inside solar system objects
astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au,
xyz_axis=-1, copy=False)
newcart = icrs_coo.cartesian - astrom_eb
srepr = newcart.represent_as(SphericalRepresentation)
i_ra = srepr.lon.to_value(u.radian)
i_dec = srepr.lat.to_value(u.radian)
gcrs_ra, gcrs_dec = atciqz(i_ra, i_dec, astrom)
newrep = SphericalRepresentation(lat=u.Quantity(gcrs_dec, u.radian, copy=False),
lon=u.Quantity(gcrs_ra, u.radian, copy=False),
distance=srepr.distance, copy=False)
return gcrs_frame.realize_frame(newrep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
GCRS, ICRS)
def gcrs_to_icrs(gcrs_coo, icrs_frame):
srepr = gcrs_coo.represent_as(SphericalRepresentation)
gcrs_ra = srepr.lon.to_value(u.radian)
gcrs_dec = srepr.lat.to_value(u.radian)
# set up the astrometry context for ICRS<->GCRS and then convert to BCRS
# coordinate direction
obs_pv = erfa.pav2pv(
gcrs_coo.obsgeoloc.get_xyz(xyz_axis=-1).to_value(u.m),
gcrs_coo.obsgeovel.get_xyz(xyz_axis=-1).to_value(u.m/u.s))
jd1, jd2 = get_jd12(gcrs_coo.obstime, 'tdb')
earth_pv, earth_heliocentric = prepare_earth_position_vel(gcrs_coo.obstime)
astrom = erfa.apcs(jd1, jd2, obs_pv, earth_pv, earth_heliocentric)
i_ra, i_dec = aticq(gcrs_ra, gcrs_dec, astrom)
if gcrs_coo.data.get_name() == 'unitspherical' or gcrs_coo.data.to_cartesian().x.unit == u.one:
# if no distance, just use the coordinate direction to yield the
# infinite-distance/no parallax answer
newrep = UnitSphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
copy=False)
else:
# When there is a distance, apply the parallax/offset to the SSB as the
# last step - ensures round-tripping with the icrs_to_gcrs transform
# the distance in intermedrep is *not* a real distance as it does not
# include the offset back to the SSB
intermedrep = SphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
distance=srepr.distance,
copy=False)
astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au,
xyz_axis=-1, copy=False)
newrep = intermedrep + astrom_eb
return icrs_frame.realize_frame(newrep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, GCRS)
def gcrs_to_gcrs(from_coo, to_frame):
if (np.all(from_coo.obstime == to_frame.obstime)
and np.all(from_coo.obsgeoloc == to_frame.obsgeoloc)):
return to_frame.realize_frame(from_coo.data)
else:
# like CIRS, we do this self-transform via ICRS
return from_coo.transform_to(ICRS).transform_to(to_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, HCRS)
def gcrs_to_hcrs(gcrs_coo, hcrs_frame):
if np.any(gcrs_coo.obstime != hcrs_frame.obstime):
# if they GCRS obstime and HCRS obstime are not the same, we first
# have to move to a GCRS where they are.
frameattrs = gcrs_coo.get_frame_attr_names()
frameattrs['obstime'] = hcrs_frame.obstime
gcrs_coo = gcrs_coo.transform_to(GCRS(**frameattrs))
srepr = gcrs_coo.represent_as(SphericalRepresentation)
gcrs_ra = srepr.lon.to_value(u.radian)
gcrs_dec = srepr.lat.to_value(u.radian)
# set up the astrometry context for ICRS<->GCRS and then convert to ICRS
# coordinate direction
obs_pv = erfa.pav2pv(
gcrs_coo.obsgeoloc.get_xyz(xyz_axis=-1).to_value(u.m),
gcrs_coo.obsgeovel.get_xyz(xyz_axis=-1).to_value(u.m/u.s))
jd1, jd2 = get_jd12(hcrs_frame.obstime, 'tdb')
earth_pv, earth_heliocentric = prepare_earth_position_vel(gcrs_coo.obstime)
astrom = erfa.apcs(jd1, jd2, obs_pv, earth_pv, earth_heliocentric)
i_ra, i_dec = aticq(gcrs_ra, gcrs_dec, astrom)
# convert to Quantity objects
i_ra = u.Quantity(i_ra, u.radian, copy=False)
i_dec = u.Quantity(i_dec, u.radian, copy=False)
if gcrs_coo.data.get_name() == 'unitspherical' or gcrs_coo.data.to_cartesian().x.unit == u.one:
# if no distance, just use the coordinate direction to yield the
# infinite-distance/no parallax answer
newrep = UnitSphericalRepresentation(lat=i_dec, lon=i_ra, copy=False)
else:
# When there is a distance, apply the parallax/offset to the
# Heliocentre as the last step to ensure round-tripping with the
# hcrs_to_gcrs transform
# Note that the distance in intermedrep is *not* a real distance as it
# does not include the offset back to the Heliocentre
intermedrep = SphericalRepresentation(lat=i_dec, lon=i_ra,
distance=srepr.distance,
copy=False)
# astrom['eh'] and astrom['em'] contain Sun to observer unit vector,
# and distance, respectively. Shapes are (X) and (X,3), where (X) is the
# shape resulting from broadcasting the shape of the times object
# against the shape of the pv array.
# broadcast em to eh and scale eh
eh = astrom['eh'] * astrom['em'][..., np.newaxis]
eh = CartesianRepresentation(eh, unit=u.au, xyz_axis=-1, copy=False)
newrep = intermedrep.to_cartesian() + eh
return hcrs_frame.realize_frame(newrep)
_NEED_ORIGIN_HINT = ("The input {0} coordinates do not have length units. This "
"probably means you created coordinates with lat/lon but "
"no distance. Heliocentric<->ICRS transforms cannot "
"function in this case because there is an origin shift.")
@frame_transform_graph.transform(AffineTransform, HCRS, ICRS)
def hcrs_to_icrs(hcrs_coo, icrs_frame):
# this is just an origin translation so without a distance it cannot go ahead
if isinstance(hcrs_coo.data, UnitSphericalRepresentation):
raise u.UnitsError(_NEED_ORIGIN_HINT.format(hcrs_coo.__class__.__name__))
if hcrs_coo.data.differentials:
from ..solar_system import get_body_barycentric_posvel
bary_sun_pos, bary_sun_vel = get_body_barycentric_posvel('sun',
hcrs_coo.obstime)
bary_sun_pos = bary_sun_pos.with_differentials(bary_sun_vel)
else:
from ..solar_system import get_body_barycentric
bary_sun_pos = get_body_barycentric('sun', hcrs_coo.obstime)
bary_sun_vel = None
return None, bary_sun_pos
@frame_transform_graph.transform(AffineTransform, ICRS, HCRS)
def icrs_to_hcrs(icrs_coo, hcrs_frame):
# this is just an origin translation so without a distance it cannot go ahead
if isinstance(icrs_coo.data, UnitSphericalRepresentation):
raise u.UnitsError(_NEED_ORIGIN_HINT.format(icrs_coo.__class__.__name__))
if icrs_coo.data.differentials:
from ..solar_system import get_body_barycentric_posvel
bary_sun_pos, bary_sun_vel = get_body_barycentric_posvel('sun',
hcrs_frame.obstime)
bary_sun_pos = -bary_sun_pos.with_differentials(-bary_sun_vel)
else:
from ..solar_system import get_body_barycentric
bary_sun_pos = -get_body_barycentric('sun', hcrs_frame.obstime)
bary_sun_vel = None
return None, bary_sun_pos
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, HCRS, HCRS)
def hcrs_to_hcrs(from_coo, to_frame):
if np.all(from_coo.obstime == to_frame.obstime):
return to_frame.realize_frame(from_coo.data)
else:
# like CIRS, we do this self-transform via ICRS
return from_coo.transform_to(ICRS).transform_to(to_frame)
|
1d7dd67d0762e3be60b9ec1d646995a715ef9ebb758bb3666990720175412845 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains the transformation functions for getting to "observed" systems from CIRS.
Currently that just means AltAz.
"""
import numpy as np
from ... import units as u
from ..baseframe import frame_transform_graph
from ..transformations import FunctionTransformWithFiniteDifference
from ..representation import (SphericalRepresentation,
UnitSphericalRepresentation)
from ... import _erfa as erfa
from .cirs import CIRS
from .altaz import AltAz
from .utils import get_polar_motion, get_dut1utc, get_jd12, PIOVER2
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, AltAz)
def cirs_to_altaz(cirs_coo, altaz_frame):
if np.any(cirs_coo.obstime != altaz_frame.obstime):
# the only frame attribute for the current CIRS is the obstime, but this
# would need to be updated if a future change allowed specifying an
# Earth location algorithm or something
cirs_coo = cirs_coo.transform_to(CIRS(obstime=altaz_frame.obstime))
# we use the same obstime everywhere now that we know they're the same
obstime = cirs_coo.obstime
# if the data are UnitSphericalRepresentation, we can skip the distance calculations
is_unitspherical = (isinstance(cirs_coo.data, UnitSphericalRepresentation) or
cirs_coo.cartesian.x.unit == u.one)
if is_unitspherical:
usrepr = cirs_coo.represent_as(UnitSphericalRepresentation)
cirs_ra = usrepr.lon.to_value(u.radian)
cirs_dec = usrepr.lat.to_value(u.radian)
else:
# compute an "astrometric" ra/dec -i.e., the direction of the
# displacement vector from the observer to the target in CIRS
loccirs = altaz_frame.location.get_itrs(cirs_coo.obstime).transform_to(cirs_coo)
diffrepr = (cirs_coo.cartesian - loccirs.cartesian).represent_as(UnitSphericalRepresentation)
cirs_ra = diffrepr.lon.to_value(u.radian)
cirs_dec = diffrepr.lat.to_value(u.radian)
lon, lat, height = altaz_frame.location.to_geodetic('WGS84')
xp, yp = get_polar_motion(obstime)
# first set up the astrometry context for CIRS<->AltAz
jd1, jd2 = get_jd12(obstime, 'utc')
astrom = erfa.apio13(jd1, jd2,
get_dut1utc(obstime),
lon.to_value(u.radian), lat.to_value(u.radian),
height.to_value(u.m),
xp, yp, # polar motion
# all below are already in correct units because they are QuantityFrameAttribues
altaz_frame.pressure.value,
altaz_frame.temperature.value,
altaz_frame.relative_humidity.value,
altaz_frame.obswl.value)
az, zen, _, _, _ = erfa.atioq(cirs_ra, cirs_dec, astrom)
if is_unitspherical:
rep = UnitSphericalRepresentation(lat=u.Quantity(PIOVER2 - zen, u.radian, copy=False),
lon=u.Quantity(az, u.radian, copy=False),
copy=False)
else:
# now we get the distance as the cartesian distance from the earth
# location to the coordinate location
locitrs = altaz_frame.location.get_itrs(obstime)
distance = locitrs.separation_3d(cirs_coo)
rep = SphericalRepresentation(lat=u.Quantity(PIOVER2 - zen, u.radian, copy=False),
lon=u.Quantity(az, u.radian, copy=False),
distance=distance,
copy=False)
return altaz_frame.realize_frame(rep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, AltAz, CIRS)
def altaz_to_cirs(altaz_coo, cirs_frame):
usrepr = altaz_coo.represent_as(UnitSphericalRepresentation)
az = usrepr.lon.to_value(u.radian)
zen = PIOVER2 - usrepr.lat.to_value(u.radian)
lon, lat, height = altaz_coo.location.to_geodetic('WGS84')
xp, yp = get_polar_motion(altaz_coo.obstime)
# first set up the astrometry context for ICRS<->CIRS at the altaz_coo time
jd1, jd2 = get_jd12(altaz_coo.obstime, 'utc')
astrom = erfa.apio13(jd1, jd2,
get_dut1utc(altaz_coo.obstime),
lon.to_value(u.radian), lat.to_value(u.radian),
height.to_value(u.m),
xp, yp, # polar motion
# all below are already in correct units because they are QuantityFrameAttribues
altaz_coo.pressure.value,
altaz_coo.temperature.value,
altaz_coo.relative_humidity.value,
altaz_coo.obswl.value)
# the 'A' indicates zen/az inputs
cirs_ra, cirs_dec = erfa.atoiq('A', az, zen, astrom)*u.radian
if isinstance(altaz_coo.data, UnitSphericalRepresentation) or altaz_coo.cartesian.x.unit == u.one:
cirs_at_aa_time = CIRS(ra=cirs_ra, dec=cirs_dec, distance=None,
obstime=altaz_coo.obstime)
else:
# treat the output of atoiq as an "astrometric" RA/DEC, so to get the
# actual RA/Dec from the observers vantage point, we have to reverse
# the vector operation of cirs_to_altaz (see there for more detail)
loccirs = altaz_coo.location.get_itrs(altaz_coo.obstime).transform_to(cirs_frame)
astrometric_rep = SphericalRepresentation(lon=cirs_ra, lat=cirs_dec,
distance=altaz_coo.distance)
newrepr = astrometric_rep + loccirs.cartesian
cirs_at_aa_time = CIRS(newrepr, obstime=altaz_coo.obstime)
# this final transform may be a no-op if the obstimes are the same
return cirs_at_aa_time.transform_to(cirs_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, AltAz, AltAz)
def altaz_to_altaz(from_coo, to_frame):
# for now we just implement this through CIRS to make sure we get everything
# covered
return from_coo.transform_to(CIRS(obstime=from_coo.obstime)).transform_to(to_frame)
|
19403cc64932a45a1611785e7fbd99d60fec6b1ea34b31c38c32a36cdaf41494 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from ... import units as u
from ...utils.decorators import format_doc
from .. import representation as r
from ..baseframe import BaseCoordinateFrame, RepresentationMapping, base_doc
from ..attributes import (Attribute, TimeAttribute,
QuantityAttribute, EarthLocationAttribute)
__all__ = ['AltAz']
_90DEG = 90*u.deg
doc_components = """
az : `Angle`, optional, must be keyword
The Azimuth for this object (``alt`` must also be given and
``representation`` must be None).
alt : `Angle`, optional, must be keyword
The Altitude for this object (``az`` must also be given and
``representation`` must be None).
distance : :class:`~astropy.units.Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
pm_az_cosalt : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion in azimuth (including the ``cos(alt)`` factor) for
this object (``pm_alt`` must also be given).
pm_alt : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion in altitude for this object (``pm_az_cosalt`` must
also be given).
radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword
The radial velocity of this object."""
doc_footer = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position and orientation of the Earth.
location : `~astropy.coordinates.EarthLocation`
The location on the Earth. This can be specified either as an
`~astropy.coordinates.EarthLocation` object or as anything that can be
transformed to an `~astropy.coordinates.ITRS` frame.
pressure : `~astropy.units.Quantity`
The atmospheric pressure as an `~astropy.units.Quantity` with pressure
units. This is necessary for performing refraction corrections.
Setting this to 0 (the default) will disable refraction calculations
when transforming to/from this frame.
temperature : `~astropy.units.Quantity`
The ground-level temperature as an `~astropy.units.Quantity` in
deg C. This is necessary for performing refraction corrections.
relative_humidity`` : `~astropy.units.Quantity` or number.
The relative humidity as a dimensionless quantity between 0 to 1.
This is necessary for performing refraction corrections.
obswl : `~astropy.units.Quantity`
The average wavelength of observations as an `~astropy.units.Quantity`
with length units. This is necessary for performing refraction
corrections.
Notes
-----
The refraction model is based on that implemented in ERFA, which is fast
but becomes inaccurate for altitudes below about 5 degrees. Near and below
altitudes of 0, it can even give meaningless answers, and in this case
transforming to AltAz and back to another frame can give highly discrepent
results. For much better numerical stability, leaving the ``pressure`` at
``0`` (the default), disabling the refraction correction (yielding
"topocentric" horizontal coordinates).
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class AltAz(BaseCoordinateFrame):
"""
A coordinate or frame in the Altitude-Azimuth system (Horizontal
coordinates). Azimuth is oriented East of North (i.e., N=0, E=90 degrees).
This frame is assumed to *include* refraction effects if the ``pressure``
frame attribute is non-zero.
The frame attributes are listed under **Other Parameters**, which are
necessary for transforming from AltAz to some other system.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping('lon', 'az'),
RepresentationMapping('lat', 'alt')
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
obstime = TimeAttribute(default=None)
location = EarthLocationAttribute(default=None)
pressure = QuantityAttribute(default=0, unit=u.hPa)
temperature = QuantityAttribute(default=0, unit=u.deg_C)
relative_humidity = QuantityAttribute(default=0, unit=u.dimensionless_unscaled)
obswl = QuantityAttribute(default=1*u.micron, unit=u.micron)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def secz(self):
"""
Secant if the zenith angle for this coordinate, a common estimate of the
airmass.
"""
return 1/np.sin(self.alt)
@property
def zen(self):
"""
The zenith angle for this coordinate
"""
return _90DEG.to(self.alt.unit) - self.alt
# self-transform defined in cirs_observed_transforms.py
|
cf08b64008c1c08315c4897b7b8ffa7443a8517921c8f97f82864aa7328ec591 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
from ... import units as u
from ...tests.helper import (catch_warnings, pytest,
assert_quantity_allclose as assert_allclose)
from ...utils import OrderedDescriptorContainer
from ...utils.compat import NUMPY_LT_1_14
from ...utils.exceptions import AstropyWarning
from .. import representation as r
from ..representation import REPRESENTATION_CLASSES
from ...units import allclose
from .test_representation import unitphysics # this fixture is used below
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
def test_frame_attribute_descriptor():
""" Unit tests of the Attribute descriptor """
from ..attributes import Attribute
class TestAttributes(metaclass=OrderedDescriptorContainer):
attr_none = Attribute()
attr_2 = Attribute(default=2)
attr_3_attr2 = Attribute(default=3, secondary_attribute='attr_2')
attr_none_attr2 = Attribute(default=None, secondary_attribute='attr_2')
attr_none_nonexist = Attribute(default=None, secondary_attribute='nonexist')
t = TestAttributes()
# Defaults
assert t.attr_none is None
assert t.attr_2 == 2
assert t.attr_3_attr2 == 3
assert t.attr_none_attr2 == t.attr_2
assert t.attr_none_nonexist is None # No default and non-existent secondary attr
# Setting values via '_'-prefixed internal vars (as would normally done in __init__)
t._attr_none = 10
assert t.attr_none == 10
t._attr_2 = 20
assert t.attr_2 == 20
assert t.attr_3_attr2 == 3
assert t.attr_none_attr2 == t.attr_2
t._attr_none_attr2 = 40
assert t.attr_none_attr2 == 40
# Make sure setting values via public attribute fails
with pytest.raises(AttributeError) as err:
t.attr_none = 5
assert 'Cannot set frame attribute' in str(err)
def test_frame_subclass_attribute_descriptor():
from ..builtin_frames import FK4
from ..attributes import Attribute, TimeAttribute
from astropy.time import Time
_EQUINOX_B1980 = Time('B1980', scale='tai')
class MyFK4(FK4):
# equinox inherited from FK4, obstime overridden, and newattr is new
obstime = TimeAttribute(default=_EQUINOX_B1980)
newattr = Attribute(default='newattr')
mfk4 = MyFK4()
assert mfk4.equinox.value == 'B1950.000'
assert mfk4.obstime.value == 'B1980.000'
assert mfk4.newattr == 'newattr'
assert set(mfk4.get_frame_attr_names()) == set(['equinox', 'obstime', 'newattr'])
mfk4 = MyFK4(equinox='J1980.0', obstime='J1990.0', newattr='world')
assert mfk4.equinox.value == 'J1980.000'
assert mfk4.obstime.value == 'J1990.000'
assert mfk4.newattr == 'world'
def test_create_data_frames():
from ..builtin_frames import ICRS
# from repr
i1 = ICRS(r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc))
i2 = ICRS(r.UnitSphericalRepresentation(lon=1*u.deg, lat=2*u.deg))
# from preferred name
i3 = ICRS(ra=1*u.deg, dec=2*u.deg, distance=3*u.kpc)
i4 = ICRS(ra=1*u.deg, dec=2*u.deg)
assert i1.data.lat == i3.data.lat
assert i1.data.lon == i3.data.lon
assert i1.data.distance == i3.data.distance
assert i2.data.lat == i4.data.lat
assert i2.data.lon == i4.data.lon
# now make sure the preferred names work as properties
assert_allclose(i1.ra, i3.ra)
assert_allclose(i2.ra, i4.ra)
assert_allclose(i1.distance, i3.distance)
with pytest.raises(AttributeError):
i1.ra = [11.]*u.deg
def test_create_orderered_data():
from ..builtin_frames import ICRS, Galactic, AltAz
TOL = 1e-10*u.deg
i = ICRS(1*u.deg, 2*u.deg)
assert (i.ra - 1*u.deg) < TOL
assert (i.dec - 2*u.deg) < TOL
g = Galactic(1*u.deg, 2*u.deg)
assert (g.l - 1*u.deg) < TOL
assert (g.b - 2*u.deg) < TOL
a = AltAz(1*u.deg, 2*u.deg)
assert (a.az - 1*u.deg) < TOL
assert (a.alt - 2*u.deg) < TOL
with pytest.raises(TypeError):
ICRS(1*u.deg, 2*u.deg, 1*u.deg, 2*u.deg)
with pytest.raises(TypeError):
sph = r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc)
ICRS(sph, 1*u.deg, 2*u.deg)
def test_create_nodata_frames():
from ..builtin_frames import ICRS, FK4, FK5
i = ICRS()
assert len(i.get_frame_attr_names()) == 0
f5 = FK5()
assert f5.equinox == FK5.get_frame_attr_names()['equinox']
f4 = FK4()
assert f4.equinox == FK4.get_frame_attr_names()['equinox']
# obstime is special because it's a property that uses equinox if obstime is not set
assert f4.obstime in (FK4.get_frame_attr_names()['obstime'],
FK4.get_frame_attr_names()['equinox'])
def test_no_data_nonscalar_frames():
from ..builtin_frames import AltAz
from astropy.time import Time
a1 = AltAz(obstime=Time('2012-01-01') + np.arange(10.) * u.day,
temperature=np.ones((3, 1)) * u.deg_C)
assert a1.obstime.shape == (3, 10)
assert a1.temperature.shape == (3, 10)
assert a1.shape == (3, 10)
with pytest.raises(ValueError) as exc:
AltAz(obstime=Time('2012-01-01') + np.arange(10.) * u.day,
temperature=np.ones((3,)) * u.deg_C)
assert 'inconsistent shapes' in str(exc)
def test_frame_repr():
from ..builtin_frames import ICRS, FK5
i = ICRS()
assert repr(i) == '<ICRS Frame>'
f5 = FK5()
assert repr(f5).startswith('<FK5 Frame (equinox=')
i2 = ICRS(ra=1*u.deg, dec=2*u.deg)
i3 = ICRS(ra=1*u.deg, dec=2*u.deg, distance=3*u.kpc)
assert repr(i2) == ('<ICRS Coordinate: (ra, dec) in deg\n'
' ({})>').format(' 1., 2.' if NUMPY_LT_1_14
else '1., 2.')
assert repr(i3) == ('<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n'
' ({})>').format(' 1., 2., 3.' if NUMPY_LT_1_14
else '1., 2., 3.')
# try with arrays
i2 = ICRS(ra=[1.1, 2.1]*u.deg, dec=[2.1, 3.1]*u.deg)
i3 = ICRS(ra=[1.1, 2.1]*u.deg, dec=[-15.6, 17.1]*u.deg, distance=[11., 21.]*u.kpc)
assert repr(i2) == ('<ICRS Coordinate: (ra, dec) in deg\n'
' [{}]>').format('( 1.1, 2.1), ( 2.1, 3.1)'
if NUMPY_LT_1_14 else
'(1.1, 2.1), (2.1, 3.1)')
if NUMPY_LT_1_14:
assert repr(i3) == ('<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n'
' [( 1.1, -15.6, 11.), ( 2.1, 17.1, 21.)]>')
else:
assert repr(i3) == ('<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n'
' [(1.1, -15.6, 11.), (2.1, 17.1, 21.)]>')
def test_frame_repr_vels():
from ..builtin_frames import ICRS
i = ICRS(ra=1*u.deg, dec=2*u.deg,
pm_ra_cosdec=1*u.marcsec/u.yr, pm_dec=2*u.marcsec/u.yr)
# unit comes out as mas/yr because of the preferred units defined in the
# frame RepresentationMapping
assert repr(i) == ('<ICRS Coordinate: (ra, dec) in deg\n'
' ({0})\n'
' (pm_ra_cosdec, pm_dec) in mas / yr\n'
' ({0})>').format(' 1., 2.' if NUMPY_LT_1_14 else
'1., 2.')
def test_converting_units():
import re
from ..baseframe import RepresentationMapping
from ..builtin_frames import ICRS, FK5
# this is a regular expression that with split (see below) removes what's
# the decimal point to fix rounding problems
rexrepr = re.compile(r'(.*?=\d\.).*?( .*?=\d\.).*?( .*)')
# Use values that aren't subject to rounding down to X.9999...
i2 = ICRS(ra=2.*u.deg, dec=2.*u.deg)
i2_many = ICRS(ra=[2., 4.]*u.deg, dec=[2., -8.1]*u.deg)
# converting from FK5 to ICRS and back changes the *internal* representation,
# but it should still come out in the preferred form
i4 = i2.transform_to(FK5).transform_to(ICRS)
i4_many = i2_many.transform_to(FK5).transform_to(ICRS)
ri2 = ''.join(rexrepr.split(repr(i2)))
ri4 = ''.join(rexrepr.split(repr(i4)))
assert ri2 == ri4
assert i2.data.lon.unit != i4.data.lon.unit # Internal repr changed
ri2_many = ''.join(rexrepr.split(repr(i2_many)))
ri4_many = ''.join(rexrepr.split(repr(i4_many)))
assert ri2_many == ri4_many
assert i2_many.data.lon.unit != i4_many.data.lon.unit # Internal repr changed
# but that *shouldn't* hold if we turn off units for the representation
class FakeICRS(ICRS):
frame_specific_representation_info = {
'spherical': [RepresentationMapping('lon', 'ra', u.hourangle),
RepresentationMapping('lat', 'dec', None),
RepresentationMapping('distance', 'distance')] # should fall back to default of None unit
}
fi = FakeICRS(i4.data)
ri2 = ''.join(rexrepr.split(repr(i2)))
rfi = ''.join(rexrepr.split(repr(fi)))
rfi = re.sub('FakeICRS', 'ICRS', rfi) # Force frame name to match
assert ri2 != rfi
# the attributes should also get the right units
assert i2.dec.unit == i4.dec.unit
# unless no/explicitly given units
assert i2.dec.unit != fi.dec.unit
assert i2.ra.unit != fi.ra.unit
assert fi.ra.unit == u.hourangle
def test_representation_info():
from ..baseframe import RepresentationMapping
from ..builtin_frames import ICRS
class NewICRS1(ICRS):
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping('lon', 'rara', u.hourangle),
RepresentationMapping('lat', 'decdec', u.degree),
RepresentationMapping('distance', 'distance', u.kpc)]
}
i1 = NewICRS1(rara=10*u.degree, decdec=-12*u.deg, distance=1000*u.pc,
pm_rara_cosdecdec=100*u.mas/u.yr,
pm_decdec=17*u.mas/u.yr,
radial_velocity=10*u.km/u.s)
assert allclose(i1.rara, 10*u.deg)
assert i1.rara.unit == u.hourangle
assert allclose(i1.decdec, -12*u.deg)
assert allclose(i1.distance, 1000*u.pc)
assert i1.distance.unit == u.kpc
assert allclose(i1.pm_rara_cosdecdec, 100*u.mas/u.yr)
assert allclose(i1.pm_decdec, 17*u.mas/u.yr)
# this should auto-set the names of UnitSpherical:
i1.set_representation_cls(r.UnitSphericalRepresentation,
s=r.UnitSphericalCosLatDifferential)
assert allclose(i1.rara, 10*u.deg)
assert allclose(i1.decdec, -12*u.deg)
assert allclose(i1.pm_rara_cosdecdec, 100*u.mas/u.yr)
assert allclose(i1.pm_decdec, 17*u.mas/u.yr)
# For backwards compatibility, we also support the string name in the
# representation info dictionary:
class NewICRS2(ICRS):
frame_specific_representation_info = {
'spherical': [
RepresentationMapping('lon', 'ang1', u.hourangle),
RepresentationMapping('lat', 'ang2', u.degree),
RepresentationMapping('distance', 'howfar', u.kpc)]
}
i2 = NewICRS2(ang1=10*u.degree, ang2=-12*u.deg, howfar=1000*u.pc)
assert allclose(i2.ang1, 10*u.deg)
assert i2.ang1.unit == u.hourangle
assert allclose(i2.ang2, -12*u.deg)
assert allclose(i2.howfar, 1000*u.pc)
assert i2.howfar.unit == u.kpc
# Test that the differential kwargs get overridden
class NewICRS3(ICRS):
frame_specific_representation_info = {
r.SphericalCosLatDifferential: [
RepresentationMapping('d_lon_coslat', 'pm_ang1', u.hourangle/u.year),
RepresentationMapping('d_lat', 'pm_ang2'),
RepresentationMapping('d_distance', 'vlos', u.kpc/u.Myr)]
}
i3 = NewICRS3(lon=10*u.degree, lat=-12*u.deg, distance=1000*u.pc,
pm_ang1=1*u.mas/u.yr, pm_ang2=2*u.mas/u.yr,
vlos=100*u.km/u.s)
assert allclose(i3.pm_ang1, 1*u.mas/u.yr)
assert i3.pm_ang1.unit == u.hourangle/u.year
assert allclose(i3.pm_ang2, 2*u.mas/u.yr)
assert allclose(i3.vlos, 100*u.km/u.s)
assert i3.vlos.unit == u.kpc/u.Myr
def test_realizing():
from ..builtin_frames import ICRS, FK5
from ...time import Time
rep = r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc)
i = ICRS()
i2 = i.realize_frame(rep)
assert not i.has_data
assert i2.has_data
f = FK5(equinox=Time('J2001', scale='utc'))
f2 = f.realize_frame(rep)
assert not f.has_data
assert f2.has_data
assert f2.equinox == f.equinox
assert f2.equinox != FK5.get_frame_attr_names()['equinox']
# Check that a nicer error message is returned:
with pytest.raises(TypeError) as excinfo:
f.realize_frame(f.representation)
assert ('Class passed as data instead of a representation' in
excinfo.value.args[0])
def test_replicating():
from ..builtin_frames import ICRS, AltAz
from ...time import Time
i = ICRS(ra=[1]*u.deg, dec=[2]*u.deg)
icopy = i.replicate(copy=True)
irepl = i.replicate(copy=False)
i.data._lat[:] = 0*u.deg
assert np.all(i.data.lat == irepl.data.lat)
assert np.all(i.data.lat != icopy.data.lat)
iclone = i.replicate_without_data()
assert i.has_data
assert not iclone.has_data
aa = AltAz(alt=1*u.deg, az=2*u.deg, obstime=Time('J2000'))
aaclone = aa.replicate_without_data(obstime=Time('J2001'))
assert not aaclone.has_data
assert aa.obstime != aaclone.obstime
assert aa.pressure == aaclone.pressure
assert aa.obswl == aaclone.obswl
def test_getitem():
from ..builtin_frames import ICRS
rep = r.SphericalRepresentation(
[1, 2, 3]*u.deg, [4, 5, 6]*u.deg, [7, 8, 9]*u.kpc)
i = ICRS(rep)
assert len(i.ra) == 3
iidx = i[1:]
assert len(iidx.ra) == 2
iidx2 = i[0]
assert iidx2.ra.isscalar
def test_transform():
"""
This test just makes sure the transform architecture works, but does *not*
actually test all the builtin transforms themselves are accurate
"""
from ..builtin_frames import ICRS, FK4, FK5, Galactic
from ...time import Time
i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg)
f = i.transform_to(FK5)
i2 = f.transform_to(ICRS)
assert i2.data.__class__ == r.UnitSphericalRepresentation
assert_allclose(i.ra, i2.ra)
assert_allclose(i.dec, i2.dec)
i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc)
f = i.transform_to(FK5)
i2 = f.transform_to(ICRS)
assert i2.data.__class__ != r.UnitSphericalRepresentation
f = FK5(ra=1*u.deg, dec=2*u.deg, equinox=Time('J2001', scale='utc'))
f4 = f.transform_to(FK4)
f4_2 = f.transform_to(FK4(equinox=f.equinox))
# make sure attributes are copied over correctly
assert f4.equinox == FK4.get_frame_attr_names()['equinox']
assert f4_2.equinox == f.equinox
# make sure self-transforms also work
i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg)
i2 = i.transform_to(ICRS)
assert_allclose(i.ra, i2.ra)
assert_allclose(i.dec, i2.dec)
f = FK5(ra=1*u.deg, dec=2*u.deg, equinox=Time('J2001', scale='utc'))
f2 = f.transform_to(FK5) # default equinox, so should be *different*
assert f2.equinox == FK5().equinox
with pytest.raises(AssertionError):
assert_allclose(f.ra, f2.ra)
with pytest.raises(AssertionError):
assert_allclose(f.dec, f2.dec)
# finally, check Galactic round-tripping
i1 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg)
i2 = i1.transform_to(Galactic).transform_to(ICRS)
assert_allclose(i1.ra, i2.ra)
assert_allclose(i1.dec, i2.dec)
def test_transform_to_nonscalar_nodata_frame():
# https://github.com/astropy/astropy/pull/5254#issuecomment-241592353
from ..builtin_frames import ICRS, FK5
from ...time import Time
times = Time('2016-08-23') + np.linspace(0, 10, 12)*u.day
coo1 = ICRS(ra=[[0.], [10.], [20.]]*u.deg,
dec=[[-30.], [30.], [60.]]*u.deg)
coo2 = coo1.transform_to(FK5(equinox=times))
assert coo2.shape == (3, 12)
def test_sep():
from ..builtin_frames import ICRS
i1 = ICRS(ra=0*u.deg, dec=1*u.deg)
i2 = ICRS(ra=0*u.deg, dec=2*u.deg)
sep = i1.separation(i2)
assert sep.deg == 1
i3 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc)
i4 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[4, 5]*u.kpc)
sep3d = i3.separation_3d(i4)
assert_allclose(sep3d.to(u.kpc), np.array([1, 1])*u.kpc)
# check that it works even with velocities
i5 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc,
pm_ra_cosdec=[1, 2]*u.mas/u.yr, pm_dec=[3, 4]*u.mas/u.yr,
radial_velocity=[5, 6]*u.km/u.s)
i6 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[7, 8]*u.kpc,
pm_ra_cosdec=[1, 2]*u.mas/u.yr, pm_dec=[3, 4]*u.mas/u.yr,
radial_velocity=[5, 6]*u.km/u.s)
sep3d = i5.separation_3d(i6)
assert_allclose(sep3d.to(u.kpc), np.array([2, 2])*u.kpc)
def test_time_inputs():
"""
Test validation and conversion of inputs for equinox and obstime attributes.
"""
from ...time import Time
from ..builtin_frames import FK4
c = FK4(1 * u.deg, 2 * u.deg, equinox='J2001.5', obstime='2000-01-01 12:00:00')
assert c.equinox == Time('J2001.5')
assert c.obstime == Time('2000-01-01 12:00:00')
with pytest.raises(ValueError) as err:
c = FK4(1 * u.deg, 2 * u.deg, equinox=1.5)
assert 'Invalid time input' in str(err)
with pytest.raises(ValueError) as err:
c = FK4(1 * u.deg, 2 * u.deg, obstime='hello')
assert 'Invalid time input' in str(err)
# A vector time should work if the shapes match, but we don't automatically
# broadcast the basic data (just like time).
FK4([1, 2] * u.deg, [2, 3] * u.deg, obstime=['J2000', 'J2001'])
with pytest.raises(ValueError) as err:
FK4(1 * u.deg, 2 * u.deg, obstime=['J2000', 'J2001'])
assert 'shape' in str(err)
def test_is_frame_attr_default():
"""
Check that the `is_frame_attr_default` machinery works as expected
"""
from ...time import Time
from ..builtin_frames import FK5
c1 = FK5(ra=1*u.deg, dec=1*u.deg)
c2 = FK5(ra=1*u.deg, dec=1*u.deg, equinox=FK5.get_frame_attr_names()['equinox'])
c3 = FK5(ra=1*u.deg, dec=1*u.deg, equinox=Time('J2001.5'))
assert c1.equinox == c2.equinox
assert c1.equinox != c3.equinox
assert c1.is_frame_attr_default('equinox')
assert not c2.is_frame_attr_default('equinox')
assert not c3.is_frame_attr_default('equinox')
c4 = c1.realize_frame(r.UnitSphericalRepresentation(3*u.deg, 4*u.deg))
c5 = c2.realize_frame(r.UnitSphericalRepresentation(3*u.deg, 4*u.deg))
assert c4.is_frame_attr_default('equinox')
assert not c5.is_frame_attr_default('equinox')
def test_altaz_attributes():
from ...time import Time
from .. import EarthLocation, AltAz
aa = AltAz(1*u.deg, 2*u.deg)
assert aa.obstime is None
assert aa.location is None
aa2 = AltAz(1*u.deg, 2*u.deg, obstime='J2000')
assert aa2.obstime == Time('J2000')
aa3 = AltAz(1*u.deg, 2*u.deg, location=EarthLocation(0*u.deg, 0*u.deg, 0*u.m))
assert isinstance(aa3.location, EarthLocation)
def test_representation():
"""
Test the getter and setter properties for `representation`
"""
from ..builtin_frames import ICRS
# Create the frame object.
icrs = ICRS(ra=1*u.deg, dec=1*u.deg)
data = icrs.data
# Create some representation objects.
icrs_cart = icrs.cartesian
icrs_spher = icrs.spherical
# Testing when `_representation` set to `CartesianRepresentation`.
icrs.representation = r.CartesianRepresentation
assert icrs.representation == r.CartesianRepresentation
assert icrs_cart.x == icrs.x
assert icrs_cart.y == icrs.y
assert icrs_cart.z == icrs.z
assert icrs.data == data
# Testing that an ICRS object in CartesianRepresentation must not have spherical attributes.
for attr in ('ra', 'dec', 'distance'):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert 'object has no attribute' in str(err)
# Testing when `_representation` set to `CylindricalRepresentation`.
icrs.representation = r.CylindricalRepresentation
assert icrs.representation == r.CylindricalRepresentation
assert icrs.data == data
# Testing setter input using text argument for spherical.
icrs.representation = 'spherical'
assert icrs.representation is r.SphericalRepresentation
assert icrs_spher.lat == icrs.dec
assert icrs_spher.lon == icrs.ra
assert icrs_spher.distance == icrs.distance
assert icrs.data == data
# Testing that an ICRS object in SphericalRepresentation must not have cartesian attributes.
for attr in ('x', 'y', 'z'):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert 'object has no attribute' in str(err)
# Testing setter input using text argument for cylindrical.
icrs.representation = 'cylindrical'
assert icrs.representation is r.CylindricalRepresentation
assert icrs.data == data
with pytest.raises(ValueError) as err:
icrs.representation = 'WRONG'
assert 'but must be a BaseRepresentation class' in str(err)
with pytest.raises(ValueError) as err:
icrs.representation = ICRS
assert 'but must be a BaseRepresentation class' in str(err)
def test_represent_as():
from ..builtin_frames import ICRS
icrs = ICRS(ra=1*u.deg, dec=1*u.deg)
cart1 = icrs.represent_as('cartesian')
cart2 = icrs.represent_as(r.CartesianRepresentation)
cart1.x == cart2.x
cart1.y == cart2.y
cart1.z == cart2.z
# now try with velocities
icrs = ICRS(ra=0*u.deg, dec=0*u.deg, distance=10*u.kpc,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr,
radial_velocity=1*u.km/u.s)
# single string
rep2 = icrs.represent_as('cylindrical')
assert isinstance(rep2, r.CylindricalRepresentation)
assert isinstance(rep2.differentials['s'], r.CylindricalDifferential)
# single class with positional in_frame_units, verify that warning raised
with catch_warnings() as w:
icrs.represent_as(r.CylindricalRepresentation, False)
assert len(w) == 1
assert w[0].category == AstropyWarning
assert 'argument position' in str(w[0].message)
# TODO: this should probably fail in the future once we figure out a better
# workaround for dealing with UnitSphericalRepresentation's with
# RadialDifferential's
# two classes
# rep2 = icrs.represent_as(r.CartesianRepresentation,
# r.SphericalCosLatDifferential)
# assert isinstance(rep2, r.CartesianRepresentation)
# assert isinstance(rep2.differentials['s'], r.SphericalCosLatDifferential)
with pytest.raises(ValueError):
icrs.represent_as('odaigahara')
def test_shorthand_representations():
from ..builtin_frames import ICRS
rep = r.CartesianRepresentation([1, 2, 3]*u.pc)
dif = r.CartesianDifferential([1, 2, 3]*u.km/u.s)
rep = rep.with_differentials(dif)
icrs = ICRS(rep)
sph = icrs.spherical
assert isinstance(sph, r.SphericalRepresentation)
assert isinstance(sph.differentials['s'], r.SphericalDifferential)
sph = icrs.sphericalcoslat
assert isinstance(sph, r.SphericalRepresentation)
assert isinstance(sph.differentials['s'], r.SphericalCosLatDifferential)
def test_dynamic_attrs():
from ..builtin_frames import ICRS
c = ICRS(1*u.deg, 2*u.deg)
assert 'ra' in dir(c)
assert 'dec' in dir(c)
with pytest.raises(AttributeError) as err:
c.blahblah
assert "object has no attribute 'blahblah'" in str(err)
with pytest.raises(AttributeError) as err:
c.ra = 1
assert "Cannot set any frame attribute" in str(err)
c.blahblah = 1
assert c.blahblah == 1
def test_nodata_error():
from ..builtin_frames import ICRS
i = ICRS()
with pytest.raises(ValueError) as excinfo:
i.data
assert 'does not have associated data' in str(excinfo.value)
def test_len0_data():
from ..builtin_frames import ICRS
i = ICRS([]*u.deg, []*u.deg)
assert i.has_data
repr(i)
def test_quantity_attributes():
from ..builtin_frames import GCRS
# make sure we can create a GCRS frame with valid inputs
GCRS(obstime='J2002', obsgeoloc=[1, 2, 3]*u.km, obsgeovel=[4, 5, 6]*u.km/u.s)
# make sure it fails for invalid lovs or vels
with pytest.raises(TypeError):
GCRS(obsgeoloc=[1, 2, 3]) # no unit
with pytest.raises(u.UnitsError):
GCRS(obsgeoloc=[1, 2, 3]*u.km/u.s) # incorrect unit
with pytest.raises(ValueError):
GCRS(obsgeoloc=[1, 3]*u.km) # incorrect shape
def test_eloc_attributes():
from .. import AltAz, ITRS, GCRS, EarthLocation
el = EarthLocation(lon=12.3*u.deg, lat=45.6*u.deg, height=1*u.km)
it = ITRS(r.SphericalRepresentation(lon=12.3*u.deg, lat=45.6*u.deg, distance=1*u.km))
gc = GCRS(ra=12.3*u.deg, dec=45.6*u.deg, distance=6375*u.km)
el1 = AltAz(location=el).location
assert isinstance(el1, EarthLocation)
# these should match *exactly* because the EarthLocation
assert el1.lat == el.lat
assert el1.lon == el.lon
assert el1.height == el.height
el2 = AltAz(location=it).location
assert isinstance(el2, EarthLocation)
# these should *not* match because giving something in Spherical ITRS is
# *not* the same as giving it as an EarthLocation: EarthLocation is on an
# elliptical geoid. So the longitude should match (because flattening is
# only along the z-axis), but latitude should not. Also, height is relative
# to the *surface* in EarthLocation, but the ITRS distance is relative to
# the center of the Earth
assert not allclose(el2.lat, it.spherical.lat)
assert allclose(el2.lon, it.spherical.lon)
assert el2.height < -6000*u.km
el3 = AltAz(location=gc).location
# GCRS inputs implicitly get transformed to ITRS and then onto
# EarthLocation's elliptical geoid. So both lat and lon shouldn't match
assert isinstance(el3, EarthLocation)
assert not allclose(el3.lat, gc.dec)
assert not allclose(el3.lon, gc.ra)
assert np.abs(el3.height) < 500*u.km
def test_equivalent_frames():
from .. import SkyCoord
from ..builtin_frames import ICRS, FK4, FK5, AltAz
i = ICRS()
i2 = ICRS(1*u.deg, 2*u.deg)
assert i.is_equivalent_frame(i)
assert i.is_equivalent_frame(i2)
with pytest.raises(TypeError):
assert i.is_equivalent_frame(10)
with pytest.raises(TypeError):
assert i2.is_equivalent_frame(SkyCoord(i2))
f1 = FK5()
f2 = FK5(1*u.deg, 2*u.deg, equinox='J2000')
f3 = FK5(equinox='J2010')
f4 = FK4(equinox='J2010')
assert f1.is_equivalent_frame(f1)
assert not i.is_equivalent_frame(f1)
assert f1.is_equivalent_frame(f2)
assert not f1.is_equivalent_frame(f3)
assert not f3.is_equivalent_frame(f4)
aa1 = AltAz()
aa2 = AltAz(obstime='J2010')
assert aa2.is_equivalent_frame(aa2)
assert not aa1.is_equivalent_frame(i)
assert not aa1.is_equivalent_frame(aa2)
def test_representation_subclass():
# Regression test for #3354
from ..builtin_frames import FK5
# Normally when instantiating a frame without a distance the frame will try
# and use UnitSphericalRepresentation internally instead of
# SphericalRepresentation.
frame = FK5(representation=r.SphericalRepresentation, ra=32 * u.deg, dec=20 * u.deg)
assert type(frame._data) == r.UnitSphericalRepresentation
assert frame.representation == r.SphericalRepresentation
# If using a SphericalRepresentation class this used to not work, so we
# test here that this is now fixed.
class NewSphericalRepresentation(r.SphericalRepresentation):
attr_classes = r.SphericalRepresentation.attr_classes
frame = FK5(representation=NewSphericalRepresentation, lon=32 * u.deg, lat=20 * u.deg)
assert type(frame._data) == r.UnitSphericalRepresentation
assert frame.representation == NewSphericalRepresentation
# A similar issue then happened in __repr__ with subclasses of
# SphericalRepresentation.
assert repr(frame) == ("<FK5 Coordinate (equinox=J2000.000): (lon, lat) in deg\n"
" ({})>").format(' 32., 20.' if NUMPY_LT_1_14
else '32., 20.')
# A more subtle issue is when specifying a custom
# UnitSphericalRepresentation subclass for the data and
# SphericalRepresentation or a subclass for the representation.
class NewUnitSphericalRepresentation(r.UnitSphericalRepresentation):
attr_classes = r.UnitSphericalRepresentation.attr_classes
def __repr__(self):
return "<NewUnitSphericalRepresentation: spam spam spam>"
frame = FK5(NewUnitSphericalRepresentation(lon=32 * u.deg, lat=20 * u.deg),
representation=NewSphericalRepresentation)
assert repr(frame) == "<FK5 Coordinate (equinox=J2000.000): spam spam spam>"
def test_getitem_representation():
"""
Make sure current representation survives __getitem__ even if different
from data representation.
"""
from ..builtin_frames import ICRS
c = ICRS([1, 1] * u.deg, [2, 2] * u.deg)
c.representation = 'cartesian'
assert c[0].representation is r.CartesianRepresentation
def test_component_error_useful():
"""
Check that a data-less frame gives useful error messages about not having
data when the attributes asked for are possible coordinate components
"""
from ..builtin_frames import ICRS
i = ICRS()
with pytest.raises(ValueError) as excinfo:
i.ra
assert 'does not have associated data' in str(excinfo.value)
with pytest.raises(AttributeError) as excinfo1:
i.foobar
with pytest.raises(AttributeError) as excinfo2:
i.lon # lon is *not* the component name despite being the underlying representation's name
assert "object has no attribute 'foobar'" in str(excinfo1.value)
assert "object has no attribute 'lon'" in str(excinfo2.value)
def test_cache_clear():
from ..builtin_frames import ICRS
i = ICRS(1*u.deg, 2*u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
assert len(i.cache['representation']) == 2
i.cache.clear()
assert len(i.cache['representation']) == 0
def test_inplace_array():
from ..builtin_frames import ICRS
i = ICRS([[1, 2], [3, 4]]*u.deg, [[10, 20], [30, 40]]*u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
# Check that repr() has added a rep to the cache
assert len(i.cache['representation']) == 2
# Modify the data
i.data.lon[:, 0] = [100, 200]*u.deg
# Clear the cache
i.cache.clear()
# This will use a second (potentially cached rep)
assert_allclose(i.ra, [[100, 2], [200, 4]]*u.deg)
assert_allclose(i.dec, [[10, 20], [30, 40]]*u.deg)
def test_inplace_change():
from ..builtin_frames import ICRS
i = ICRS(1*u.deg, 2*u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
# Check that repr() has added a rep to the cache
assert len(i.cache['representation']) == 2
# Modify the data
i.data.lon[()] = 10*u.deg
# Clear the cache
i.cache.clear()
# This will use a second (potentially cached rep)
assert i.ra == 10 * u.deg
assert i.dec == 2 * u.deg
def test_representation_with_multiple_differentials():
from ..builtin_frames import ICRS
dif1 = r.CartesianDifferential([1, 2, 3]*u.km/u.s)
dif2 = r.CartesianDifferential([1, 2, 3]*u.km/u.s**2)
rep = r.CartesianRepresentation([1, 2, 3]*u.pc,
differentials={'s': dif1, 's2': dif2})
# check warning is raised for a scalar
with pytest.raises(ValueError):
ICRS(rep)
def test_representation_arg_backwards_compatibility():
# TODO: this test can be removed when the `representation` argument is
# removed from the BaseCoordinateFrame initializer.
from ..builtin_frames import ICRS
c1 = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc,
representation_type=r.CartesianRepresentation)
c2 = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc,
representation=r.CartesianRepresentation)
c3 = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc,
representation='cartesian')
assert c1.x == c2.x
assert c1.y == c2.y
assert c1.z == c2.z
assert c1.x == c3.x
assert c1.y == c3.y
assert c1.z == c3.z
assert c1.representation == c1.representation_type
with pytest.raises(ValueError):
ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc,
representation='cartesian',
representation_type='cartesian')
def test_missing_component_error_names():
"""
This test checks that the component names are frame component names, not
representation or differential names, when referenced in an exception raised
when not passing in enough data. For example:
ICRS(ra=10*u.deg)
should state:
TypeError: __init__() missing 1 required positional argument: 'dec'
"""
from ..builtin_frames import ICRS
with pytest.raises(TypeError) as e:
ICRS(ra=150 * u.deg)
assert "missing 1 required positional argument: 'dec'" in str(e)
with pytest.raises(TypeError) as e:
ICRS(ra=150*u.deg, dec=-11*u.deg,
pm_ra=100*u.mas/u.yr, pm_dec=10*u.mas/u.yr)
assert "pm_ra_cosdec" in str(e)
def test_non_spherical_representation_unit_creation(unitphysics):
from ..builtin_frames import ICRS
class PhysicsICRS(ICRS):
default_representation = r.PhysicsSphericalRepresentation
pic = PhysicsICRS(phi=1*u.deg, theta=25*u.deg, r=1*u.kpc)
assert isinstance(pic.data, r.PhysicsSphericalRepresentation)
picu = PhysicsICRS(phi=1*u.deg, theta=25*u.deg)
assert isinstance(picu.data, unitphysics)
|
7bf58e0ef6a69e62e530f3d58d151e14c5a2f1ed8a3a576a68150bb464058c80 |
import pytest
import numpy as np
from ...time import Time
from ... import units as u
from ...constants import c
from ..builtin_frames import GCRS
from ..earth import EarthLocation
from ..sky_coordinate import SkyCoord
from ..solar_system import (get_body, get_moon, BODY_NAME_TO_KERNEL_SPEC,
_apparent_position_in_true_coordinates,
get_body_barycentric, get_body_barycentric_posvel)
from ..funcs import get_sun
from ...tests.helper import assert_quantity_allclose
from ...units import allclose as quantity_allclose
try:
import jplephem # pylint: disable=W0611
except ImportError:
HAS_JPLEPHEM = False
else:
HAS_JPLEPHEM = True
try:
from skyfield.api import load # pylint: disable=W0611
except ImportError:
HAS_SKYFIELD = False
else:
HAS_SKYFIELD = True
de432s_separation_tolerance_planets = 5*u.arcsec
de432s_separation_tolerance_moon = 5*u.arcsec
de432s_distance_tolerance = 20*u.km
skyfield_angular_separation_tolerance = 1*u.arcsec
skyfield_separation_tolerance = 10*u.km
@pytest.mark.remote_data
@pytest.mark.skipif(str('not HAS_SKYFIELD'))
def test_positions_skyfield():
"""
Test positions against those generated by skyfield.
"""
t = Time('1980-03-25 00:00')
location = None
# skyfield ephemeris
planets = load('de421.bsp')
ts = load.timescale()
mercury, jupiter, moon = planets['mercury'], planets['jupiter barycenter'], planets['moon']
earth = planets['earth']
skyfield_t = ts.from_astropy(t)
if location is not None:
earth = earth.topos(latitude_degrees=location.lat.to_value(u.deg),
longitude_degrees=location.lon.to_value(u.deg),
elevation_m=location.height.to_value(u.m))
skyfield_mercury = earth.at(skyfield_t).observe(mercury).apparent()
skyfield_jupiter = earth.at(skyfield_t).observe(jupiter).apparent()
skyfield_moon = earth.at(skyfield_t).observe(moon).apparent()
if location is not None:
obsgeoloc, obsgeovel = location.get_gcrs_posvel(t)
frame = GCRS(obstime=t, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)
else:
frame = GCRS(obstime=t)
ra, dec, dist = skyfield_mercury.radec(epoch='date')
skyfield_mercury = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km),
frame=frame)
ra, dec, dist = skyfield_jupiter.radec(epoch='date')
skyfield_jupiter = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km),
frame=frame)
ra, dec, dist = skyfield_moon.radec(epoch='date')
skyfield_moon = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km),
frame=frame)
moon_astropy = get_moon(t, location, ephemeris='de430')
mercury_astropy = get_body('mercury', t, location, ephemeris='de430')
jupiter_astropy = get_body('jupiter', t, location, ephemeris='de430')
# convert to true equator and equinox
jupiter_astropy = _apparent_position_in_true_coordinates(jupiter_astropy)
mercury_astropy = _apparent_position_in_true_coordinates(mercury_astropy)
moon_astropy = _apparent_position_in_true_coordinates(moon_astropy)
assert (moon_astropy.separation(skyfield_moon) <
skyfield_angular_separation_tolerance)
assert (moon_astropy.separation_3d(skyfield_moon) < skyfield_separation_tolerance)
assert (jupiter_astropy.separation(skyfield_jupiter) <
skyfield_angular_separation_tolerance)
assert (jupiter_astropy.separation_3d(skyfield_jupiter) <
skyfield_separation_tolerance)
assert (mercury_astropy.separation(skyfield_mercury) <
skyfield_angular_separation_tolerance)
assert (mercury_astropy.separation_3d(skyfield_mercury) <
skyfield_separation_tolerance)
class TestPositionsGeocentric:
"""
Test positions against those generated by JPL Horizons accessed on
2016-03-28, with refraction turned on.
"""
def setup(self):
self.t = Time('1980-03-25 00:00')
self.frame = GCRS(obstime=self.t)
# Results returned by JPL Horizons web interface
self.horizons = {
'mercury': SkyCoord(ra='22h41m47.78s', dec='-08d29m32.0s',
distance=c*6.323037*u.min, frame=self.frame),
'moon': SkyCoord(ra='07h32m02.62s', dec='+18d34m05.0s',
distance=c*0.021921*u.min, frame=self.frame),
'jupiter': SkyCoord(ra='10h17m12.82s', dec='+12d02m57.0s',
distance=c*37.694557*u.min, frame=self.frame),
'sun': SkyCoord(ra='00h16m31.00s', dec='+01d47m16.9s',
distance=c*8.294858*u.min, frame=self.frame)}
@pytest.mark.parametrize(('body', 'sep_tol', 'dist_tol'),
(('mercury', 7.*u.arcsec, 1000*u.km),
('jupiter', 78.*u.arcsec, 76000*u.km),
('moon', 20.*u.arcsec, 80*u.km),
('sun', 5.*u.arcsec, 11.*u.km)))
def test_erfa_planet(self, body, sep_tol, dist_tol):
"""Test predictions using erfa/plan94.
Accuracies are maximum deviations listed in erfa/plan94.c, for Jupiter and
Mercury, and that quoted in Meeus "Astronomical Algorithms" (1998) for the Moon.
"""
astropy = get_body(body, self.t, ephemeris='builtin')
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = _apparent_position_in_true_coordinates(astropy)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < sep_tol
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=dist_tol)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('body', ('mercury', 'jupiter', 'sun'))
def test_de432s_planet(self, body):
astropy = get_body(body, self.t, ephemeris='de432s')
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = _apparent_position_in_true_coordinates(astropy)
# Assert sky coordinates are close.
assert (astropy.separation(horizons) <
de432s_separation_tolerance_planets)
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=de432s_distance_tolerance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_de432s_moon(self):
astropy = get_moon(self.t, ephemeris='de432s')
horizons = self.horizons['moon']
# convert to true equator and equinox
astropy = _apparent_position_in_true_coordinates(astropy)
# Assert sky coordinates are close.
assert (astropy.separation(horizons) <
de432s_separation_tolerance_moon)
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=de432s_distance_tolerance)
class TestPositionKittPeak:
"""
Test positions against those generated by JPL Horizons accessed on
2016-03-28, with refraction turned on.
"""
def setup(self):
kitt_peak = EarthLocation.from_geodetic(lon=-111.6*u.deg,
lat=31.963333333333342*u.deg,
height=2120*u.m)
self.t = Time('2014-09-25T00:00', location=kitt_peak)
obsgeoloc, obsgeovel = kitt_peak.get_gcrs_posvel(self.t)
self.frame = GCRS(obstime=self.t,
obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)
# Results returned by JPL Horizons web interface
self.horizons = {
'mercury': SkyCoord(ra='13h38m58.50s', dec='-13d34m42.6s',
distance=c*7.699020*u.min, frame=self.frame),
'moon': SkyCoord(ra='12h33m12.85s', dec='-05d17m54.4s',
distance=c*0.022054*u.min, frame=self.frame),
'jupiter': SkyCoord(ra='09h09m55.55s', dec='+16d51m57.8s',
distance=c*49.244937*u.min, frame=self.frame)}
@pytest.mark.parametrize(('body', 'sep_tol', 'dist_tol'),
(('mercury', 7.*u.arcsec, 500*u.km),
('jupiter', 78.*u.arcsec, 82000*u.km)))
def test_erfa_planet(self, body, sep_tol, dist_tol):
"""Test predictions using erfa/plan94.
Accuracies are maximum deviations listed in erfa/plan94.c.
"""
# Add uncertainty in position of Earth
dist_tol = dist_tol + 1300 * u.km
astropy = get_body(body, self.t, ephemeris='builtin')
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = _apparent_position_in_true_coordinates(astropy)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < sep_tol
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=dist_tol)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('body', ('mercury', 'jupiter'))
def test_de432s_planet(self, body):
astropy = get_body(body, self.t, ephemeris='de432s')
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = _apparent_position_in_true_coordinates(astropy)
# Assert sky coordinates are close.
assert (astropy.separation(horizons) <
de432s_separation_tolerance_planets)
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=de432s_distance_tolerance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_de432s_moon(self):
astropy = get_moon(self.t, ephemeris='de432s')
horizons = self.horizons['moon']
# convert to true equator and equinox
astropy = _apparent_position_in_true_coordinates(astropy)
# Assert sky coordinates are close.
assert (astropy.separation(horizons) <
de432s_separation_tolerance_moon)
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=de432s_distance_tolerance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('bodyname', ('mercury', 'jupiter'))
def test_custom_kernel_spec_body(self, bodyname):
"""
Checks that giving a kernel specifier instead of a body name works
"""
coord_by_name = get_body(bodyname, self.t, ephemeris='de432s')
kspec = BODY_NAME_TO_KERNEL_SPEC[bodyname]
coord_by_kspec = get_body(kspec, self.t, ephemeris='de432s')
assert_quantity_allclose(coord_by_name.ra, coord_by_kspec.ra)
assert_quantity_allclose(coord_by_name.dec, coord_by_kspec.dec)
assert_quantity_allclose(coord_by_name.distance, coord_by_kspec.distance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('time', (Time('1960-01-12 00:00'),
Time('1980-03-25 00:00'),
Time('2010-10-13 00:00')))
def test_get_sun_consistency(time):
"""
Test that the sun from JPL and the builtin get_sun match
"""
sun_jpl_gcrs = get_body('sun', time, ephemeris='de432s')
builtin_get_sun = get_sun(time)
sep = builtin_get_sun.separation(sun_jpl_gcrs)
assert sep < 0.1*u.arcsec
def test_get_moon_nonscalar_regression():
"""
Test that the builtin ephemeris works with non-scalar times.
See Issue #5069.
"""
times = Time(["2015-08-28 03:30", "2015-09-05 10:30"])
# the following line will raise an Exception if the bug recurs.
get_moon(times, ephemeris='builtin')
def test_barycentric_pos_posvel_same():
# Check that the two routines give identical results.
ep1 = get_body_barycentric('earth', Time('2016-03-20T12:30:00'))
ep2, _ = get_body_barycentric_posvel('earth', Time('2016-03-20T12:30:00'))
assert np.all(ep1.xyz == ep2.xyz)
def test_earth_barycentric_velocity_rough():
# Check that a time near the equinox gives roughly the right result.
ep, ev = get_body_barycentric_posvel('earth', Time('2016-03-20T12:30:00'))
assert_quantity_allclose(ep.xyz, [-1., 0., 0.]*u.AU, atol=0.01*u.AU)
expected = u.Quantity([0.*u.one,
np.cos(23.5*u.deg),
np.sin(23.5*u.deg)]) * -30. * u.km / u.s
assert_quantity_allclose(ev.xyz, expected, atol=1.*u.km/u.s)
def test_earth_barycentric_velocity_multi_d():
# Might as well test it with a multidimensional array too.
t = Time('2016-03-20T12:30:00') + np.arange(8.).reshape(2, 2, 2) * u.yr / 2.
ep, ev = get_body_barycentric_posvel('earth', t)
# note: assert_quantity_allclose doesn't like the shape mismatch.
# this is a problem with np.testing.assert_allclose.
assert quantity_allclose(ep.get_xyz(xyz_axis=-1),
[[-1., 0., 0.], [+1., 0., 0.]]*u.AU,
atol=0.06*u.AU)
expected = u.Quantity([0.*u.one,
np.cos(23.5*u.deg),
np.sin(23.5*u.deg)]) * ([[-30.], [30.]] * u.km / u.s)
assert quantity_allclose(ev.get_xyz(xyz_axis=-1), expected,
atol=2.*u.km/u.s)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize(('body', 'pos_tol', 'vel_tol'),
(('mercury', 1000.*u.km, 1.*u.km/u.s),
('jupiter', 100000.*u.km, 2.*u.km/u.s),
('earth', 10*u.km, 10*u.mm/u.s)))
def test_barycentric_velocity_consistency(body, pos_tol, vel_tol):
# Tolerances are about 1.5 times the rms listed for plan94 and epv00,
# except for Mercury (which nominally is 334 km rms)
t = Time('2016-03-20T12:30:00')
ep, ev = get_body_barycentric_posvel(body, t, ephemeris='builtin')
dp, dv = get_body_barycentric_posvel(body, t, ephemeris='de432s')
assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol)
assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol)
# Might as well test it with a multidimensional array too.
t = Time('2016-03-20T12:30:00') + np.arange(8.).reshape(2, 2, 2) * u.yr / 2.
ep, ev = get_body_barycentric_posvel(body, t, ephemeris='builtin')
dp, dv = get_body_barycentric_posvel(body, t, ephemeris='de432s')
assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol)
assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol)
|
2c0de110584393908a3849582afb0e8ff4dd752d24b024e7bd96122ad4028ddf | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from ... import units as u
from .. import transformations as t
from ..builtin_frames import ICRS, FK5, FK4, FK4NoETerms, Galactic, AltAz
from .. import representation as r
from ..baseframe import frame_transform_graph
from ...tests.helper import (assert_quantity_allclose as assert_allclose,
catch_warnings)
from ...time import Time
from ...units import allclose as quantity_allclose
# Coordinates just for these tests.
class TCoo1(ICRS):
pass
class TCoo2(ICRS):
pass
class TCoo3(ICRS):
pass
def test_transform_classes():
"""
Tests the class-based/OO syntax for creating transforms
"""
tfun = lambda c, f: f.__class__(ra=c.ra, dec=c.dec)
trans1 = t.FunctionTransform(tfun, TCoo1, TCoo2,
register_graph=frame_transform_graph)
c1 = TCoo1(ra=1*u.radian, dec=0.5*u.radian)
c2 = c1.transform_to(TCoo2)
assert_allclose(c2.ra.radian, 1)
assert_allclose(c2.dec.radian, 0.5)
def matfunc(coo, fr):
return [[1, 0, 0],
[0, coo.ra.degree, 0],
[0, 0, 1]]
trans2 = t.DynamicMatrixTransform(matfunc, TCoo1, TCoo2)
trans2.register(frame_transform_graph)
c3 = TCoo1(ra=1*u.deg, dec=2*u.deg)
c4 = c3.transform_to(TCoo2)
assert_allclose(c4.ra.degree, 1)
assert_allclose(c4.ra.degree, 1)
# be sure to unregister the second one - no need for trans1 because it
# already got unregistered when trans2 was created.
trans2.unregister(frame_transform_graph)
def test_transform_decos():
"""
Tests the decorator syntax for creating transforms
"""
c1 = TCoo1(ra=1*u.deg, dec=2*u.deg)
@frame_transform_graph.transform(t.FunctionTransform, TCoo1, TCoo2)
def trans(coo1, f):
return TCoo2(ra=coo1.ra, dec=coo1.dec * 2)
c2 = c1.transform_to(TCoo2)
assert_allclose(c2.ra.degree, 1)
assert_allclose(c2.dec.degree, 4)
c3 = TCoo1(r.CartesianRepresentation(x=1*u.pc, y=1*u.pc, z=2*u.pc))
@frame_transform_graph.transform(t.StaticMatrixTransform, TCoo1, TCoo2)
def matrix():
return [[2, 0, 0],
[0, 1, 0],
[0, 0, 1]]
c4 = c3.transform_to(TCoo2)
assert_allclose(c4.cartesian.x, 2*u.pc)
assert_allclose(c4.cartesian.y, 1*u.pc)
assert_allclose(c4.cartesian.z, 2*u.pc)
def test_shortest_path():
class FakeTransform:
def __init__(self, pri):
self.priority = pri
g = t.TransformGraph()
# cheating by adding graph elements directly that are not classes - the
# graphing algorithm still works fine with integers - it just isn't a valid
# TransformGraph
# the graph looks is a down-going diamond graph with the lower-right slightly
# heavier and a cycle from the bottom to the top
# also, a pair of nodes isolated from 1
g._graph[1][2] = FakeTransform(1)
g._graph[1][3] = FakeTransform(1)
g._graph[2][4] = FakeTransform(1)
g._graph[3][4] = FakeTransform(2)
g._graph[4][1] = FakeTransform(5)
g._graph[5][6] = FakeTransform(1)
path, d = g.find_shortest_path(1, 2)
assert path == [1, 2]
assert d == 1
path, d = g.find_shortest_path(1, 3)
assert path == [1, 3]
assert d == 1
path, d = g.find_shortest_path(1, 4)
print('Cached paths:', g._shortestpaths)
assert path == [1, 2, 4]
assert d == 2
# unreachable
path, d = g.find_shortest_path(1, 5)
assert path is None
assert d == float('inf')
path, d = g.find_shortest_path(5, 6)
assert path == [5, 6]
assert d == 1
def test_sphere_cart():
"""
Tests the spherical <-> cartesian transform functions
"""
from ...utils import NumpyRNGContext
from .. import spherical_to_cartesian, cartesian_to_spherical
x, y, z = spherical_to_cartesian(1, 0, 0)
assert_allclose(x, 1)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(0, 1, 1)
assert_allclose(x, 0)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(5, 0, np.arcsin(4. / 5.))
assert_allclose(x, 3)
assert_allclose(y, 4)
assert_allclose(z, 0)
r, lat, lon = cartesian_to_spherical(0, 1, 0)
assert_allclose(r, 1)
assert_allclose(lat, 0 * u.deg)
assert_allclose(lon, np.pi / 2 * u.rad)
# test round-tripping
with NumpyRNGContext(13579):
x, y, z = np.random.randn(3, 5)
r, lat, lon = cartesian_to_spherical(x, y, z)
x2, y2, z2 = spherical_to_cartesian(r, lat, lon)
assert_allclose(x, x2)
assert_allclose(y, y2)
assert_allclose(z, z2)
def test_transform_path_pri():
"""
This checks that the transformation path prioritization works by
making sure the ICRS -> Gal transformation always goes through FK5
and not FK4.
"""
frame_transform_graph.invalidate_cache()
tpath, td = frame_transform_graph.find_shortest_path(ICRS, Galactic)
assert tpath == [ICRS, FK5, Galactic]
assert td == 2
# but direct from FK4 to Galactic should still be possible
tpath, td = frame_transform_graph.find_shortest_path(FK4, Galactic)
assert tpath == [FK4, FK4NoETerms, Galactic]
assert td == 2
def test_obstime():
"""
Checks to make sure observation time is
accounted for at least in FK4 <-> ICRS transformations
"""
b1950 = Time('B1950', scale='utc')
j1975 = Time('J1975', scale='utc')
fk4_50 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=b1950)
fk4_75 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=j1975)
icrs_50 = fk4_50.transform_to(ICRS)
icrs_75 = fk4_75.transform_to(ICRS)
# now check that the resulting coordinates are *different* - they should be,
# because the obstime is different
assert icrs_50.ra.degree != icrs_75.ra.degree
assert icrs_50.dec.degree != icrs_75.dec.degree
# ------------------------------------------------------------------------------
# Affine transform tests and helpers:
# just acting as a namespace
class transfunc:
rep = r.CartesianRepresentation(np.arange(3)*u.pc)
dif = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr)
rep0 = r.CartesianRepresentation(np.zeros(3)*u.pc)
@classmethod
def both(cls, coo, fr):
# exchange x <-> z and offset
M = np.array([[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.]])
return M, cls.rep.with_differentials(cls.dif)
@classmethod
def just_matrix(cls, coo, fr):
# exchange x <-> z and offset
M = np.array([[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.]])
return M, None
@classmethod
def no_matrix(cls, coo, fr):
return None, cls.rep.with_differentials(cls.dif)
@classmethod
def no_pos(cls, coo, fr):
return None, cls.rep0.with_differentials(cls.dif)
@classmethod
def no_vel(cls, coo, fr):
return None, cls.rep
@pytest.mark.parametrize('transfunc', [transfunc.both, transfunc.no_matrix,
transfunc.no_pos, transfunc.no_vel,
transfunc.just_matrix])
@pytest.mark.parametrize('rep', [
r.CartesianRepresentation(5, 6, 7, unit=u.pc),
r.CartesianRepresentation(5, 6, 7, unit=u.pc,
differentials=r.CartesianDifferential(8, 9, 10,
unit=u.pc/u.Myr)),
r.CartesianRepresentation(5, 6, 7, unit=u.pc,
differentials=r.CartesianDifferential(8, 9, 10,
unit=u.pc/u.Myr))
.represent_as(r.CylindricalRepresentation, r.CylindricalDifferential)
])
def test_affine_transform_succeed(transfunc, rep):
c = TCoo1(rep)
# compute expected output
M, offset = transfunc(c, TCoo2)
_rep = rep.to_cartesian()
diffs = dict([(k, diff.represent_as(r.CartesianDifferential, rep))
for k, diff in rep.differentials.items()])
expected_rep = _rep.with_differentials(diffs)
if M is not None:
expected_rep = expected_rep.transform(M)
expected_pos = expected_rep.without_differentials()
if offset is not None:
expected_pos = expected_pos + offset.without_differentials()
expected_vel = None
if c.data.differentials:
expected_vel = expected_rep.differentials['s']
if offset and offset.differentials:
expected_vel = (expected_vel + offset.differentials['s'])
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc, TCoo1, TCoo2)
trans.register(frame_transform_graph)
c2 = c.transform_to(TCoo2)
assert quantity_allclose(c2.data.to_cartesian().xyz,
expected_pos.to_cartesian().xyz)
if expected_vel is not None:
diff = c2.data.differentials['s'].to_cartesian(base=c2.data)
assert quantity_allclose(diff.xyz, expected_vel.d_xyz)
trans.unregister(frame_transform_graph)
# these should fail
def transfunc_invalid_matrix(coo, fr):
return np.eye(4), None
# Leaving this open in case we want to add more functions to check for failures
@pytest.mark.parametrize('transfunc', [transfunc_invalid_matrix])
def test_affine_transform_fail(transfunc):
diff = r.CartesianDifferential(8, 9, 10, unit=u.pc/u.Myr)
rep = r.CartesianRepresentation(5, 6, 7, unit=u.pc, differentials=diff)
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc, TCoo1, TCoo2)
trans.register(frame_transform_graph)
with pytest.raises(ValueError):
c2 = c.transform_to(TCoo2)
trans.unregister(frame_transform_graph)
def test_too_many_differentials():
dif1 = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr)
dif2 = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr**2)
rep = r.CartesianRepresentation(np.arange(3)*u.pc,
differentials={'s': dif1, 's2': dif2})
with pytest.raises(ValueError):
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc.both, TCoo1, TCoo2)
trans.register(frame_transform_graph)
# Check that if frame somehow gets through to transformation, multiple
# differentials are caught
c = TCoo1(rep.without_differentials())
c._data = c._data.with_differentials({'s': dif1, 's2': dif2})
with pytest.raises(ValueError):
c2 = c.transform_to(TCoo2)
trans.unregister(frame_transform_graph)
# A matrix transform of a unit spherical with differentials should work
@pytest.mark.parametrize('rep', [
r.UnitSphericalRepresentation(lon=15*u.degree, lat=-11*u.degree,
differentials=r.SphericalDifferential(d_lon=15*u.mas/u.yr,
d_lat=11*u.mas/u.yr,
d_distance=-110*u.km/u.s)),
r.UnitSphericalRepresentation(lon=15*u.degree, lat=-11*u.degree,
differentials={'s': r.RadialDifferential(d_distance=-110*u.km/u.s)}),
r.SphericalRepresentation(lon=15*u.degree, lat=-11*u.degree,
distance=150*u.pc,
differentials={'s': r.RadialDifferential(d_distance=-110*u.km/u.s)})
])
def test_unit_spherical_with_differentials(rep):
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc.just_matrix, TCoo1, TCoo2)
trans.register(frame_transform_graph)
c2 = c.transform_to(TCoo2)
assert 's' in rep.differentials
assert isinstance(c2.data.differentials['s'],
rep.differentials['s'].__class__)
if isinstance(rep.differentials['s'], r.RadialDifferential):
assert c2.data.differentials['s'] is rep.differentials['s']
trans.unregister(frame_transform_graph)
# should fail if we have to do offsets
trans = t.AffineTransform(transfunc.both, TCoo1, TCoo2)
trans.register(frame_transform_graph)
with pytest.raises(TypeError):
c.transform_to(TCoo2)
trans.unregister(frame_transform_graph)
def test_vel_transformation_obstime_err():
# TODO: replace after a final decision on PR #6280
from ..sites import get_builtin_sites
diff = r.CartesianDifferential([.1, .2, .3]*u.km/u.s)
rep = r.CartesianRepresentation([1, 2, 3]*u.au, differentials=diff)
loc = get_builtin_sites()['example_site']
aaf = AltAz(obstime='J2010', location=loc)
aaf2 = AltAz(obstime=aaf.obstime + 3*u.day, location=loc)
aaf3 = AltAz(obstime=aaf.obstime + np.arange(3)*u.day, location=loc)
aaf4 = AltAz(obstime=aaf.obstime, location=loc)
aa = aaf.realize_frame(rep)
with pytest.raises(NotImplementedError) as exc:
aa.transform_to(aaf2)
assert 'cannot transform' in exc.value.args[0]
with pytest.raises(NotImplementedError) as exc:
aa.transform_to(aaf3)
assert 'cannot transform' in exc.value.args[0]
aa.transform_to(aaf4)
aa.transform_to(ICRS())
def test_function_transform_with_differentials():
tfun = lambda c, f: f.__class__(ra=c.ra, dec=c.dec)
ftrans = t.FunctionTransform(tfun, TCoo3, TCoo2,
register_graph=frame_transform_graph)
t3 = TCoo3(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=1*u.marcsec/u.yr,
pm_dec=1*u.marcsec/u.yr,)
with catch_warnings() as w:
t2 = t3.transform_to(TCoo2)
assert len(w) == 1
assert 'they have been dropped' in str(w[0].message)
def test_frame_override_component_with_attribute():
"""
It was previously possible to define a frame with an attribute with the
same name as a component. We don't want to allow this!
"""
from ..baseframe import BaseCoordinateFrame
from ..attributes import Attribute
class BorkedFrame(BaseCoordinateFrame):
ra = Attribute(default=150)
dec = Attribute(default=150)
def trans_func(coo1, f):
pass
trans = t.FunctionTransform(trans_func, BorkedFrame, ICRS)
with pytest.raises(ValueError) as exc:
trans.register(frame_transform_graph)
assert ('BorkedFrame' in exc.value.args[0] and
"'ra'" in exc.value.args[0] and
"'dec'" in exc.value.args[0])
def test_static_matrix_combine_paths():
"""
Check that combined staticmatrixtransform matrices provide the same
transformation as using an intermediate transformation.
This is somewhat of a regression test for #7706
"""
from ..baseframe import BaseCoordinateFrame
from ..matrix_utilities import rotation_matrix
class AFrame(BaseCoordinateFrame):
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
t1 = t.StaticMatrixTransform(rotation_matrix(30.*u.deg, 'z'),
ICRS, AFrame)
t1.register(frame_transform_graph)
t2 = t.StaticMatrixTransform(rotation_matrix(30.*u.deg, 'z').T,
AFrame, ICRS)
t2.register(frame_transform_graph)
class BFrame(BaseCoordinateFrame):
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
t3 = t.StaticMatrixTransform(rotation_matrix(30.*u.deg, 'x'),
ICRS, BFrame)
t3.register(frame_transform_graph)
t4 = t.StaticMatrixTransform(rotation_matrix(30.*u.deg, 'x').T,
BFrame, ICRS)
t4.register(frame_transform_graph)
c = Galactic(123*u.deg, 45*u.deg)
c1 = c.transform_to(BFrame) # direct
c2 = c.transform_to(AFrame).transform_to(BFrame) # thru A
c3 = c.transform_to(ICRS).transform_to(BFrame) # thru ICRS
assert quantity_allclose(c1.lon, c2.lon)
assert quantity_allclose(c1.lat, c2.lat)
assert quantity_allclose(c1.lon, c3.lon)
assert quantity_allclose(c1.lat, c3.lat)
for t_ in [t1, t2, t3, t4]:
t_.unregister(frame_transform_graph)
|
74564e32b9a30291c1b1e7e300c3e29feff211d0c0a8f543c6c7c4de43232550 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from ... import units as u
from ..builtin_frames import ICRS, Galactic, Galactocentric
from .. import builtin_frames as bf
from ...units import allclose as quantity_allclose
from ..errors import ConvertError
from .. import representation as r
def test_api():
# transform observed Barycentric velocities to full-space Galactocentric
gc_frame = Galactocentric()
icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg, distance=101*u.pc,
pm_ra_cosdec=21*u.mas/u.yr, pm_dec=-71*u.mas/u.yr,
radial_velocity=71*u.km/u.s)
icrs.transform_to(gc_frame)
# transform a set of ICRS proper motions to Galactic
icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg,
pm_ra_cosdec=21*u.mas/u.yr, pm_dec=-71*u.mas/u.yr)
icrs.transform_to(Galactic)
# transform a Barycentric RV to a GSR RV
icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg, distance=1.*u.pc,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr,
radial_velocity=71*u.km/u.s)
icrs.transform_to(Galactocentric)
all_kwargs = [
dict(ra=37.4*u.deg, dec=-55.8*u.deg),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc),
dict(ra=37.4*u.deg, dec=-55.8*u.deg,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr),
dict(ra=37.4*u.deg, dec=-55.8*u.deg,
radial_velocity=105.7*u.km/u.s),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
radial_velocity=105.7*u.km/u.s),
dict(ra=37.4*u.deg, dec=-55.8*u.deg,
radial_velocity=105.7*u.km/u.s,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s),
# Now test other representation/differential types:
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
representation_type='cartesian'),
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
representation_type=r.CartesianRepresentation),
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
v_x=100.*u.km/u.s, v_y=200*u.km/u.s, v_z=300*u.km/u.s,
representation_type=r.CartesianRepresentation,
differential_type=r.CartesianDifferential),
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
v_x=100.*u.km/u.s, v_y=200*u.km/u.s, v_z=300*u.km/u.s,
representation_type=r.CartesianRepresentation,
differential_type='cartesian'),
]
@pytest.mark.parametrize('kwargs', all_kwargs)
def test_all_arg_options(kwargs):
# Above is a list of all possible valid combinations of arguments.
# Here we do a simple thing and just verify that passing them in, we have
# access to the relevant attributes from the resulting object
icrs = ICRS(**kwargs)
gal = icrs.transform_to(Galactic)
repr_gal = repr(gal)
for k in kwargs:
if k == 'differential_type':
continue
getattr(icrs, k)
if 'pm_ra_cosdec' in kwargs: # should have both
assert 'pm_l_cosb' in repr_gal
assert 'pm_b' in repr_gal
assert 'mas / yr' in repr_gal
if 'radial_velocity' not in kwargs:
assert 'radial_velocity' not in repr_gal
if 'radial_velocity' in kwargs:
assert 'radial_velocity' in repr_gal
assert 'km / s' in repr_gal
if 'pm_ra_cosdec' not in kwargs:
assert 'pm_l_cosb' not in repr_gal
assert 'pm_b' not in repr_gal
@pytest.mark.parametrize('cls,lon,lat', [
[bf.ICRS, 'ra', 'dec'], [bf.FK4, 'ra', 'dec'], [bf.FK4NoETerms, 'ra', 'dec'],
[bf.FK5, 'ra', 'dec'], [bf.GCRS, 'ra', 'dec'], [bf.HCRS, 'ra', 'dec'],
[bf.LSR, 'ra', 'dec'], [bf.CIRS, 'ra', 'dec'], [bf.Galactic, 'l', 'b'],
[bf.AltAz, 'az', 'alt'], [bf.Supergalactic, 'sgl', 'sgb'],
[bf.GalacticLSR, 'l', 'b'], [bf.HeliocentricTrueEcliptic, 'lon', 'lat'],
[bf.GeocentricTrueEcliptic, 'lon', 'lat'],
[bf.BarycentricTrueEcliptic, 'lon', 'lat'],
[bf.PrecessedGeocentric, 'ra', 'dec']
])
def test_expected_arg_names(cls, lon, lat):
kwargs = {lon: 37.4*u.deg, lat: -55.8*u.deg, 'distance': 150*u.pc,
'pm_{0}_cos{1}'.format(lon, lat): -21.2*u.mas/u.yr,
'pm_{0}'.format(lat): 17.1*u.mas/u.yr,
'radial_velocity': 105.7*u.km/u.s}
frame = cls(**kwargs)
# these data are extracted from the vizier copy of XHIP:
# http://vizier.u-strasbg.fr/viz-bin/VizieR-3?-source=+V/137A/XHIP
_xhip_head = """
------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------
R D pmRA pmDE Di pmGLon pmGLat RV U V W
HIP AJ2000 (deg) EJ2000 (deg) (mas/yr) (mas/yr) GLon (deg) GLat (deg) st (pc) (mas/yr) (mas/yr) (km/s) (km/s) (km/s) (km/s)
------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------
"""[1:-1]
_xhip_data = """
19 000.05331690 +38.30408633 -3.17 -15.37 112.00026470 -23.47789171 247.12 -6.40 -14.33 6.30 7.3 2.0 -17.9
20 000.06295067 +23.52928427 36.11 -22.48 108.02779304 -37.85659811 95.90 29.35 -30.78 37.80 -19.3 16.1 -34.2
21 000.06623581 +08.00723430 61.48 -0.23 101.69697120 -52.74179515 183.68 58.06 -20.23 -11.72 -45.2 -30.9 -1.3
24917 080.09698238 -33.39874984 -4.30 13.40 236.92324669 -32.58047131 107.38 -14.03 -1.15 36.10 -22.4 -21.3 -19.9
59207 182.13915108 +65.34963517 18.17 5.49 130.04157185 51.18258601 56.00 -18.98 -0.49 5.70 1.5 6.1 4.4
87992 269.60730667 +36.87462906 -89.58 72.46 62.98053142 25.90148234 129.60 45.64 105.79 -4.00 -39.5 -15.8 56.7
115110 349.72322473 -28.74087144 48.86 -9.25 23.00447250 -69.52799804 116.87 -8.37 -49.02 15.00 -16.8 -12.2 -23.6
"""[1:-1]
# in principal we could parse the above as a table, but doing it "manually"
# makes this test less tied to Table working correctly
@pytest.mark.parametrize('hip,ra,dec,pmra,pmdec,glon,glat,dist,pmglon,pmglat,rv,U,V,W',
[[float(val) for val in row.split()] for row in _xhip_data.split('\n')])
def test_xhip_galactic(hip, ra, dec, pmra, pmdec, glon, glat, dist, pmglon, pmglat, rv, U, V, W):
i = ICRS(ra*u.deg, dec*u.deg, dist*u.pc,
pm_ra_cosdec=pmra*u.marcsec/u.yr, pm_dec=pmdec*u.marcsec/u.yr,
radial_velocity=rv*u.km/u.s)
g = i.transform_to(Galactic)
# precision is limited by 2-deciimal digit string representation of pms
assert quantity_allclose(g.pm_l_cosb, pmglon*u.marcsec/u.yr, atol=.01*u.marcsec/u.yr)
assert quantity_allclose(g.pm_b, pmglat*u.marcsec/u.yr, atol=.01*u.marcsec/u.yr)
# make sure UVW also makes sense
uvwg = g.cartesian.differentials['s']
# precision is limited by 1-decimal digit string representation of vels
assert quantity_allclose(uvwg.d_x, U*u.km/u.s, atol=.1*u.km/u.s)
assert quantity_allclose(uvwg.d_y, V*u.km/u.s, atol=.1*u.km/u.s)
assert quantity_allclose(uvwg.d_z, W*u.km/u.s, atol=.1*u.km/u.s)
@pytest.mark.parametrize('kwargs,expect_success', [
[dict(ra=37.4*u.deg, dec=-55.8*u.deg), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc), True],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, radial_velocity=105.7*u.km/u.s), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
radial_velocity=105.7*u.km/u.s), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg,
radial_velocity=105.7*u.km/u.s,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s), True]
])
def test_frame_affinetransform(kwargs, expect_success):
"""There are already tests in test_transformations.py that check that
an AffineTransform fails without full-space data, but this just checks that
things work as expected at the frame level as well.
"""
icrs = ICRS(**kwargs)
if expect_success:
gc = icrs.transform_to(Galactocentric)
else:
with pytest.raises(ConvertError):
icrs.transform_to(Galactocentric)
def test_differential_type_arg():
"""
Test passing in an explicit differential class to the initializer or
changing the differential class via set_representation_cls
"""
from ..builtin_frames import ICRS
icrs = ICRS(ra=1*u.deg, dec=60*u.deg,
pm_ra=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr,
differential_type=r.UnitSphericalDifferential)
assert icrs.pm_ra == 10*u.mas/u.yr
icrs = ICRS(ra=1*u.deg, dec=60*u.deg,
pm_ra=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr,
differential_type={'s': r.UnitSphericalDifferential})
assert icrs.pm_ra == 10*u.mas/u.yr
icrs = ICRS(ra=1*u.deg, dec=60*u.deg,
pm_ra_cosdec=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr)
icrs.set_representation_cls(s=r.UnitSphericalDifferential)
assert quantity_allclose(icrs.pm_ra, 20*u.mas/u.yr)
# incompatible representation and differential
with pytest.raises(TypeError):
ICRS(ra=1*u.deg, dec=60*u.deg,
v_x=1*u.km/u.s, v_y=-2*u.km/u.s, v_z=-2*u.km/u.s,
differential_type=r.CartesianDifferential)
# specify both
icrs = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc,
v_x=1*u.km/u.s, v_y=2*u.km/u.s, v_z=3*u.km/u.s,
representation=r.CartesianRepresentation,
differential_type=r.CartesianDifferential)
assert icrs.x == 1*u.pc
assert icrs.y == 2*u.pc
assert icrs.z == 3*u.pc
assert icrs.v_x == 1*u.km/u.s
assert icrs.v_y == 2*u.km/u.s
assert icrs.v_z == 3*u.km/u.s
def test_slicing_preserves_differential():
icrs = ICRS(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s)
icrs2 = icrs.reshape(1,1)[:1,0]
for name in icrs.representation_component_names.keys():
assert getattr(icrs, name) == getattr(icrs2, name)[0]
for name in icrs.get_representation_component_names('s').keys():
assert getattr(icrs, name) == getattr(icrs2, name)[0]
def test_shorthand_attributes():
# Check that attribute access works
# for array data:
n = 4
icrs1 = ICRS(ra=np.random.uniform(0, 360, n)*u.deg,
dec=np.random.uniform(-90, 90, n)*u.deg,
distance=100*u.pc,
pm_ra_cosdec=np.random.normal(0, 100, n)*u.mas/u.yr,
pm_dec=np.random.normal(0, 100, n)*u.mas/u.yr,
radial_velocity=np.random.normal(0, 100, n)*u.km/u.s)
v = icrs1.velocity
pm = icrs1.proper_motion
assert quantity_allclose(pm[0], icrs1.pm_ra_cosdec)
assert quantity_allclose(pm[1], icrs1.pm_dec)
# for scalar data:
icrs2 = ICRS(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s)
v = icrs2.velocity
pm = icrs2.proper_motion
assert quantity_allclose(pm[0], icrs2.pm_ra_cosdec)
assert quantity_allclose(pm[1], icrs2.pm_dec)
# check that it fails where we expect:
# no distance
rv = 105.7*u.km/u.s
icrs3 = ICRS(ra=37.4*u.deg, dec=-55.8*u.deg,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=rv)
with pytest.raises(ValueError):
icrs3.velocity
icrs3.set_representation_cls('cartesian')
assert hasattr(icrs3, 'radial_velocity')
assert quantity_allclose(icrs3.radial_velocity, rv)
icrs4 = ICRS(x=30*u.pc, y=20*u.pc, z=11*u.pc,
v_x=10*u.km/u.s, v_y=10*u.km/u.s, v_z=10*u.km/u.s,
representation_type=r.CartesianRepresentation,
differential_type=r.CartesianDifferential)
icrs4.radial_velocity
|
f1873ccc08f827dd78ab115a4a26fb7280b63e8c447e07522b070976b6b1e043 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from ... import units as u
from ..distances import Distance
from ..builtin_frames import (ICRS, FK5, FK4, FK4NoETerms, Galactic,
Supergalactic, Galactocentric, HCRS, GCRS, LSR)
from .. import SkyCoord
from ...tests.helper import assert_quantity_allclose as assert_allclose
from .. import EarthLocation, CartesianRepresentation
from ...time import Time
from ...units import allclose
# used below in the next parametrized test
m31_sys = [ICRS, FK5, FK4, Galactic]
m31_coo = [(10.6847929, 41.2690650), (10.6847929, 41.2690650), (10.0004738, 40.9952444), (121.1744050, -21.5729360)]
m31_dist = Distance(770, u.kpc)
convert_precision = 1 * u.arcsec
roundtrip_precision = 1e-4 * u.degree
dist_precision = 1e-9 * u.kpc
m31_params = []
for i in range(len(m31_sys)):
for j in range(len(m31_sys)):
if i < j:
m31_params.append((m31_sys[i], m31_sys[j], m31_coo[i], m31_coo[j]))
@pytest.mark.parametrize(('fromsys', 'tosys', 'fromcoo', 'tocoo'), m31_params)
def test_m31_coord_transforms(fromsys, tosys, fromcoo, tocoo):
"""
This tests a variety of coordinate conversions for the Chandra point-source
catalog location of M31 from NED.
"""
coo1 = fromsys(ra=fromcoo[0]*u.deg, dec=fromcoo[1]*u.deg, distance=m31_dist)
coo2 = coo1.transform_to(tosys)
if tosys is FK4:
coo2_prec = coo2.transform_to(FK4(equinox=Time('B1950', scale='utc')))
assert (coo2_prec.spherical.lon - tocoo[0]*u.deg) < convert_precision # <1 arcsec
assert (coo2_prec.spherical.lat - tocoo[1]*u.deg) < convert_precision
else:
assert (coo2.spherical.lon - tocoo[0]*u.deg) < convert_precision # <1 arcsec
assert (coo2.spherical.lat - tocoo[1]*u.deg) < convert_precision
assert coo1.distance.unit == u.kpc
assert coo2.distance.unit == u.kpc
assert m31_dist.unit == u.kpc
assert (coo2.distance - m31_dist) < dist_precision
# check round-tripping
coo1_2 = coo2.transform_to(fromsys)
assert (coo1_2.spherical.lon - fromcoo[0]*u.deg) < roundtrip_precision
assert (coo1_2.spherical.lat - fromcoo[1]*u.deg) < roundtrip_precision
assert (coo1_2.distance - m31_dist) < dist_precision
def test_precession():
"""
Ensures that FK4 and FK5 coordinates precess their equinoxes
"""
j2000 = Time('J2000', scale='utc')
b1950 = Time('B1950', scale='utc')
j1975 = Time('J1975', scale='utc')
b1975 = Time('B1975', scale='utc')
fk4 = FK4(ra=1*u.radian, dec=0.5*u.radian)
assert fk4.equinox.byear == b1950.byear
fk4_2 = fk4.transform_to(FK4(equinox=b1975))
assert fk4_2.equinox.byear == b1975.byear
fk5 = FK5(ra=1*u.radian, dec=0.5*u.radian)
assert fk5.equinox.jyear == j2000.jyear
fk5_2 = fk5.transform_to(FK4(equinox=j1975))
assert fk5_2.equinox.jyear == j1975.jyear
def test_fk5_galactic():
"""
Check that FK5 -> Galactic gives the same as FK5 -> FK4 -> Galactic.
"""
fk5 = FK5(ra=1*u.deg, dec=2*u.deg)
direct = fk5.transform_to(Galactic)
indirect = fk5.transform_to(FK4).transform_to(Galactic)
assert direct.separation(indirect).degree < 1.e-10
direct = fk5.transform_to(Galactic)
indirect = fk5.transform_to(FK4NoETerms).transform_to(Galactic)
assert direct.separation(indirect).degree < 1.e-10
def test_galactocentric():
# when z_sun=0, transformation should be very similar to Galactic
icrs_coord = ICRS(ra=np.linspace(0, 360, 10)*u.deg,
dec=np.linspace(-90, 90, 10)*u.deg,
distance=1.*u.kpc)
g_xyz = icrs_coord.transform_to(Galactic).cartesian.xyz
gc_xyz = icrs_coord.transform_to(Galactocentric(z_sun=0*u.kpc)).cartesian.xyz
diff = np.abs(g_xyz - gc_xyz)
assert allclose(diff[0], 8.3*u.kpc, atol=1E-5*u.kpc)
assert allclose(diff[1:], 0*u.kpc, atol=1E-5*u.kpc)
# generate some test coordinates
g = Galactic(l=[0, 0, 45, 315]*u.deg, b=[-45, 45, 0, 0]*u.deg,
distance=[np.sqrt(2)]*4*u.kpc)
xyz = g.transform_to(Galactocentric(galcen_distance=1.*u.kpc, z_sun=0.*u.pc)).cartesian.xyz
true_xyz = np.array([[0, 0, -1.], [0, 0, 1], [0, 1, 0], [0, -1, 0]]).T*u.kpc
assert allclose(xyz.to(u.kpc), true_xyz.to(u.kpc), atol=1E-5*u.kpc)
# check that ND arrays work
# from Galactocentric to Galactic
x = np.linspace(-10., 10., 100) * u.kpc
y = np.linspace(-10., 10., 100) * u.kpc
z = np.zeros_like(x)
g1 = Galactocentric(x=x, y=y, z=z)
g2 = Galactocentric(x=x.reshape(100, 1, 1), y=y.reshape(100, 1, 1),
z=z.reshape(100, 1, 1))
g1t = g1.transform_to(Galactic)
g2t = g2.transform_to(Galactic)
assert_allclose(g1t.cartesian.xyz, g2t.cartesian.xyz[:, :, 0, 0])
# from Galactic to Galactocentric
l = np.linspace(15, 30., 100) * u.deg
b = np.linspace(-10., 10., 100) * u.deg
d = np.ones_like(l.value) * u.kpc
g1 = Galactic(l=l, b=b, distance=d)
g2 = Galactic(l=l.reshape(100, 1, 1), b=b.reshape(100, 1, 1),
distance=d.reshape(100, 1, 1))
g1t = g1.transform_to(Galactocentric)
g2t = g2.transform_to(Galactocentric)
np.testing.assert_almost_equal(g1t.cartesian.xyz.value,
g2t.cartesian.xyz.value[:, :, 0, 0])
def test_supergalactic():
"""
Check Galactic<->Supergalactic and Galactic<->ICRS conversion.
"""
# Check supergalactic North pole.
npole = Galactic(l=47.37*u.degree, b=+6.32*u.degree)
assert allclose(npole.transform_to(Supergalactic).sgb.deg, +90, atol=1e-9)
# Check the origin of supergalactic longitude.
lon0 = Supergalactic(sgl=0*u.degree, sgb=0*u.degree)
lon0_gal = lon0.transform_to(Galactic)
assert allclose(lon0_gal.l.deg, 137.37, atol=1e-9)
assert allclose(lon0_gal.b.deg, 0, atol=1e-9)
# Test Galactic<->ICRS with some positions that appear in Foley et al. 2008
# (http://adsabs.harvard.edu/abs/2008A%26A...484..143F)
# GRB 021219
supergalactic = Supergalactic(sgl=29.91*u.degree, sgb=+73.72*u.degree)
icrs = SkyCoord('18h50m27s +31d57m17s')
assert supergalactic.separation(icrs) < 0.005 * u.degree
# GRB 030320
supergalactic = Supergalactic(sgl=-174.44*u.degree, sgb=+46.17*u.degree)
icrs = SkyCoord('17h51m36s -25d18m52s')
assert supergalactic.separation(icrs) < 0.005 * u.degree
class TestHCRS():
"""
Check HCRS<->ICRS coordinate conversions.
Uses ICRS Solar positions predicted by get_body_barycentric; with `t1` and
`tarr` as defined below, the ICRS Solar positions were predicted using, e.g.
coord.ICRS(coord.get_body_barycentric(tarr, 'sun')).
"""
def setup(self):
self.t1 = Time("2013-02-02T23:00")
self.t2 = Time("2013-08-02T23:00")
self.tarr = Time(["2013-02-02T23:00", "2013-08-02T23:00"])
self.sun_icrs_scalar = ICRS(ra=244.52984668*u.deg,
dec=-22.36943723*u.deg,
distance=406615.66347377*u.km)
# array of positions corresponds to times in `tarr`
self.sun_icrs_arr = ICRS(ra=[244.52989062, 271.40976248]*u.deg,
dec=[-22.36943605, -25.07431079]*u.deg,
distance=[406615.66347377, 375484.13558956]*u.km)
# corresponding HCRS positions
self.sun_hcrs_t1 = HCRS(CartesianRepresentation([0.0, 0.0, 0.0] * u.km),
obstime=self.t1)
twod_rep = CartesianRepresentation([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]] * u.km)
self.sun_hcrs_tarr = HCRS(twod_rep, obstime=self.tarr)
self.tolerance = 5*u.km
def test_from_hcrs(self):
# test scalar transform
transformed = self.sun_hcrs_t1.transform_to(ICRS())
separation = transformed.separation_3d(self.sun_icrs_scalar)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
# test non-scalar positions and times
transformed = self.sun_hcrs_tarr.transform_to(ICRS())
separation = transformed.separation_3d(self.sun_icrs_arr)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
def test_from_icrs(self):
# scalar positions
transformed = self.sun_icrs_scalar.transform_to(HCRS(obstime=self.t1))
separation = transformed.separation_3d(self.sun_hcrs_t1)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
# nonscalar positions
transformed = self.sun_icrs_arr.transform_to(HCRS(obstime=self.tarr))
separation = transformed.separation_3d(self.sun_hcrs_tarr)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
class TestHelioBaryCentric():
"""
Check GCRS<->Heliocentric and Barycentric coordinate conversions.
Uses the WHT observing site (information grabbed from data/sites.json).
"""
def setup(self):
wht = EarthLocation(342.12*u.deg, 28.758333333333333*u.deg, 2327*u.m)
self.obstime = Time("2013-02-02T23:00")
self.wht_itrs = wht.get_itrs(obstime=self.obstime)
def test_heliocentric(self):
gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime))
helio = gcrs.transform_to(HCRS(obstime=self.obstime))
# Check it doesn't change from previous times.
previous = [-1.02597256e+11, 9.71725820e+10, 4.21268419e+10] * u.m
assert_allclose(helio.cartesian.xyz, previous)
# And that it agrees with SLALIB to within 14km
helio_slalib = [-0.685820296, 0.6495585893, 0.2816005464] * u.au
assert np.sqrt(((helio.cartesian.xyz -
helio_slalib)**2).sum()) < 14. * u.km
def test_barycentric(self):
gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime))
bary = gcrs.transform_to(ICRS())
previous = [-1.02758958e+11, 9.68331109e+10, 4.19720938e+10] * u.m
assert_allclose(bary.cartesian.xyz, previous)
# And that it agrees with SLALIB answer to within 14km
bary_slalib = [-0.6869012079, 0.6472893646, 0.2805661191] * u.au
assert np.sqrt(((bary.cartesian.xyz -
bary_slalib)**2).sum()) < 14. * u.km
def test_lsr_sanity():
# random numbers, but zero velocity in ICRS frame
icrs = ICRS(ra=15.1241*u.deg, dec=17.5143*u.deg, distance=150.12*u.pc,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr,
radial_velocity=0*u.km/u.s)
lsr = icrs.transform_to(LSR)
lsr_diff = lsr.data.differentials['s']
cart_lsr_vel = lsr_diff.represent_as(CartesianRepresentation, base=lsr.data)
lsr_vel = ICRS(cart_lsr_vel)
gal_lsr = lsr_vel.transform_to(Galactic).cartesian.xyz
assert allclose(gal_lsr.to(u.km/u.s, u.dimensionless_angles()),
lsr.v_bary.d_xyz)
# moving with LSR velocity
lsr = LSR(ra=15.1241*u.deg, dec=17.5143*u.deg, distance=150.12*u.pc,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr,
radial_velocity=0*u.km/u.s)
icrs = lsr.transform_to(ICRS)
icrs_diff = icrs.data.differentials['s']
cart_vel = icrs_diff.represent_as(CartesianRepresentation, base=icrs.data)
vel = ICRS(cart_vel)
gal_icrs = vel.transform_to(Galactic).cartesian.xyz
assert allclose(gal_icrs.to(u.km/u.s, u.dimensionless_angles()),
-lsr.v_bary.d_xyz)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.